repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
xinyu7/tornado | tornado/test/concurrent_test.py | 63 | 11014 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import logging
import re
import socket
import sys
import traceback
from tornado.concurrent import Future, return_future, ReturnValueIgnoredError
from tornado.escape import utf8, to_unicode
from tornado import gen
from tornado.iostream import IOStream
from tornado import stack_context
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, LogTrapTestCase, bind_unused_port, gen_test
try:
from concurrent import futures
except ImportError:
futures = None
class ReturnFutureTest(AsyncTestCase):
@return_future
def sync_future(self, callback):
callback(42)
@return_future
def async_future(self, callback):
self.io_loop.add_callback(callback, 42)
@return_future
def immediate_failure(self, callback):
1 / 0
@return_future
def delayed_failure(self, callback):
self.io_loop.add_callback(lambda: 1 / 0)
@return_future
def return_value(self, callback):
# Note that the result of both running the callback and returning
# a value (or raising an exception) is unspecified; with current
# implementations the last event prior to callback resolution wins.
return 42
@return_future
def no_result_future(self, callback):
callback()
def test_immediate_failure(self):
with self.assertRaises(ZeroDivisionError):
# The caller sees the error just like a normal function.
self.immediate_failure(callback=self.stop)
# The callback is not run because the function failed synchronously.
self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
result = self.wait()
self.assertIs(result, None)
def test_return_value(self):
with self.assertRaises(ReturnValueIgnoredError):
self.return_value(callback=self.stop)
def test_callback_kw(self):
future = self.sync_future(callback=self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_callback_positional(self):
# When the callback is passed in positionally, future_wrap shouldn't
# add another callback in the kwargs.
future = self.sync_future(self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_no_callback(self):
future = self.sync_future()
self.assertEqual(future.result(), 42)
def test_none_callback_kw(self):
# explicitly pass None as callback
future = self.sync_future(callback=None)
self.assertEqual(future.result(), 42)
def test_none_callback_pos(self):
future = self.sync_future(None)
self.assertEqual(future.result(), 42)
def test_async_future(self):
future = self.async_future()
self.assertFalse(future.done())
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
self.assertEqual(future.result(), 42)
@gen_test
def test_async_future_gen(self):
result = yield self.async_future()
self.assertEqual(result, 42)
def test_delayed_failure(self):
future = self.delayed_failure()
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
with self.assertRaises(ZeroDivisionError):
future.result()
def test_kw_only_callback(self):
@return_future
def f(**kwargs):
kwargs['callback'](42)
future = f()
self.assertEqual(future.result(), 42)
def test_error_in_callback(self):
self.sync_future(callback=lambda future: 1 / 0)
# The exception gets caught by our StackContext and will be re-raised
# when we wait.
self.assertRaises(ZeroDivisionError, self.wait)
def test_no_result_future(self):
future = self.no_result_future(self.stop)
result = self.wait()
self.assertIs(result, None)
# result of this future is undefined, but not an error
future.result()
def test_no_result_future_callback(self):
future = self.no_result_future(callback=lambda: self.stop())
result = self.wait()
self.assertIs(result, None)
future.result()
@gen_test
def test_future_traceback(self):
@return_future
@gen.engine
def f(callback):
yield gen.Task(self.io_loop.add_callback)
try:
1 / 0
except ZeroDivisionError:
self.expected_frame = traceback.extract_tb(
sys.exc_info()[2], limit=1)[0]
raise
try:
yield f()
self.fail("didn't get expected exception")
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
self.assertIn(self.expected_frame, tb)
# The following series of classes demonstrate and test various styles
# of use, with and without generators and futures.
class CapServer(TCPServer):
def handle_stream(self, stream, address):
logging.info("handle_stream")
self.stream = stream
self.stream.read_until(b"\n", self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
data = to_unicode(data)
if data == data.upper():
self.stream.write(b"error\talready capitalized\n")
else:
# data already has \n
self.stream.write(utf8("ok\t%s" % data.upper()))
self.stream.close()
class CapError(Exception):
pass
class BaseCapClient(object):
def __init__(self, port, io_loop):
self.port = port
self.io_loop = io_loop
def process_response(self, data):
status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
if status == 'ok':
return message
else:
raise CapError(message)
class ManualCapClient(BaseCapClient):
def capitalize(self, request_data, callback=None):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.future = Future()
if callback is not None:
self.future.add_done_callback(
stack_context.wrap(lambda future: callback(future.result())))
return self.future
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
try:
self.future.set_result(self.process_response(data))
except CapError as e:
self.future.set_exception(e)
class DecoratorCapClient(BaseCapClient):
@return_future
def capitalize(self, request_data, callback):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.callback = callback
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
self.callback(self.process_response(data))
class GeneratorCapClient(BaseCapClient):
@return_future
@gen.engine
def capitalize(self, request_data, callback):
logging.info('capitalize')
stream = IOStream(socket.socket(), io_loop=self.io_loop)
logging.info('connecting')
yield gen.Task(stream.connect, ('127.0.0.1', self.port))
stream.write(utf8(request_data + '\n'))
logging.info('reading')
data = yield gen.Task(stream.read_until, b'\n')
logging.info('returning')
stream.close()
callback(self.process_response(data))
class ClientTestMixin(object):
def setUp(self):
super(ClientTestMixin, self).setUp()
self.server = CapServer(io_loop=self.io_loop)
sock, port = bind_unused_port()
self.server.add_sockets([sock])
self.client = self.client_class(io_loop=self.io_loop, port=port)
def tearDown(self):
self.server.stop()
super(ClientTestMixin, self).tearDown()
def test_callback(self):
self.client.capitalize("hello", callback=self.stop)
result = self.wait()
self.assertEqual(result, "HELLO")
def test_callback_error(self):
self.client.capitalize("HELLO", callback=self.stop)
self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
def test_future(self):
future = self.client.capitalize("hello")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertEqual(future.result(), "HELLO")
def test_future_error(self):
future = self.client.capitalize("HELLO")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertRaisesRegexp(CapError, "already capitalized", future.result)
def test_generator(self):
@gen.engine
def f():
result = yield self.client.capitalize("hello")
self.assertEqual(result, "HELLO")
self.stop()
f()
self.wait()
def test_generator_error(self):
@gen.engine
def f():
with self.assertRaisesRegexp(CapError, "already capitalized"):
yield self.client.capitalize("HELLO")
self.stop()
f()
self.wait()
class ManualClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = ManualCapClient
class DecoratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = DecoratorCapClient
class GeneratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = GeneratorCapClient
| apache-2.0 |
yukisakurai/hhntup | higgstautau/pileup.py | 1 | 6320 | from rootpy.tree.filtering import EventFilter
from externaltools import PileupReweighting
from ROOT import Root
from . import datasets
from . import log; log = log[__name__]
# https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/InDetTrackingPerformanceGuidelines
PU_RESCALE = {
2011: (0.97, 0.01),
2012: (1.09, 0.04),
}
PILEUP_TOOLS = []
def get_pileup_reweighting_tool(year, use_defaults=True, systematic=None):
# https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/ExtendedPileupReweighting
# Initialize the pileup reweighting tool
pileup_tool = Root.TPileupReweighting()
if year == 2011:
if use_defaults:
pileup_tool.AddConfigFile(
PileupReweighting.get_resource(
'mc11b_defaults.prw.root'))
else:
pileup_tool.AddConfigFile(
'lumi/2011/'
'TPileupReweighting.mc11.prw.root')
lumicalc_file = 'lumi/2011/ilumicalc_histograms_None_178044-191933.root'
elif year == 2012:
if use_defaults:
pileup_tool.AddConfigFile(
PileupReweighting.get_resource(
'mc12ab_defaults.prw.root'))
else:
pileup_tool.AddConfigFile(
'lumi/2012/'
'TPileupReweighting.mc12.prw.root')
lumicalc_file = 'lumi/2012/ilumicalc_histograms_None_200842-215643.root'
else:
raise ValueError(
'No pileup reweighting defined for year %d' % year)
rescale, rescale_error = PU_RESCALE[year]
if systematic is None:
pileup_tool.SetDataScaleFactors(1. / rescale)
elif systematic == 'high':
pileup_tool.SetDataScaleFactors(1. / (rescale + rescale_error))
elif systematic == 'low':
pileup_tool.SetDataScaleFactors(1. / (rescale - rescale_error))
else:
raise ValueError(
"pileup systematic '{0}' not understood".format(systematic))
pileup_tool.AddLumiCalcFile(lumicalc_file)
# discard unrepresented data (with mu not simulated in MC)
pileup_tool.SetUnrepresentedDataAction(2)
pileup_tool.Initialize()
# set the random seed used by the GetRandomRunNumber and
# GetRandomPeriodNumber methods
pileup_tool.SetRandomSeed(1777)
# register
PILEUP_TOOLS.append(pileup_tool)
return pileup_tool
class PileupTemplates(EventFilter):
def __init__(self, year, passthrough=False, **kwargs):
if not passthrough:
# initialize the pileup reweighting tool
self.pileup_tool = Root.TPileupReweighting()
if year == 2011:
self.pileup_tool.UsePeriodConfig("MC11b")
elif year == 2012:
self.pileup_tool.UsePeriodConfig("MC12a")
self.pileup_tool.Initialize()
super(PileupTemplates, self).__init__(
passthrough=passthrough,
**kwargs)
def passes(self, event):
self.pileup_tool.Fill(
event.RunNumber,
event.mc_channel_number,
event.mc_event_weight,
event.averageIntPerXing)
return True
def finalize(self):
if not self.passthrough:
# write the pileup reweighting file
self.pileup_tool.WriteToFile()
class PileupReweight(EventFilter):
"""
Currently only implements hadhad reweighting
"""
def __init__(self, year, tool, tool_high, tool_low,
tree, passthrough=False, **kwargs):
if not passthrough:
self.tree = tree
self.tool = tool
self.tool_high = tool_high
self.tool_low = tool_low
super(PileupReweight, self).__init__(
passthrough=passthrough,
**kwargs)
def passes(self, event):
# set the pileup weights
self.tree.pileup_weight = self.tool.GetCombinedWeight(
event.RunNumber,
event.mc_channel_number,
event.averageIntPerXing)
self.tree.pileup_weight_high = self.tool_high.GetCombinedWeight(
event.RunNumber,
event.mc_channel_number,
event.averageIntPerXing)
self.tree.pileup_weight_low = self.tool_low.GetCombinedWeight(
event.RunNumber,
event.mc_channel_number,
event.averageIntPerXing)
#log.info("Run: {0}".format(event.RunNumber))
#log.info("Channel: {0}".format(event.mc_channel_number))
#log.info("mu: {0}".format(event.averageIntPerXing))
#log.info("Weight: {0}".format(self.tree.pileup_weight))
return True
class PileupScale(EventFilter):
def __init__(self, tree, year, datatype, **kwargs):
self.tree = tree
self.scale = PU_RESCALE[year][0]
super(PileupScale, self).__init__(**kwargs)
if datatype in (datasets.DATA, datasets.EMBED):
self.passes = self.passes_data
elif datatype in (datasets.MC, datasets.MCEMBED):
self.passes = self.passes_mc
else:
raise ValueError("no pileup scale defined for datatype %d" %
datatype)
def passes_data(self, event):
self.tree.averageIntPerXing = event.averageIntPerXing
self.tree.actualIntPerXing = event.actualIntPerXing
return True
def passes_mc(self, event):
self.tree.averageIntPerXing = event.averageIntPerXing * self.scale
self.tree.actualIntPerXing = event.actualIntPerXing * self.scale
return True
class averageIntPerXingPatch(EventFilter):
"""
https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/ExtendedPileupReweighting:
NOTE (23/01/2013): A bug has been found in the d3pd making code, causing
all MC12 samples to have a few of the averageIntPerXing values incorrectly set
(some should be 0 but are set to 1). The bug does not affect data. To resolve
this, when reading this branch, for both prw file generating and for when
retrieving pileup weights, you should amend the value with the following line
of code:
averageIntPerXing = (isSimulation && lbn==1 && int(averageIntPerXing+0.5)==1) ? 0. : averageIntPerXing;
"""
def passes(self, event):
if event.lbn == 1 and int(event.averageIntPerXing + 0.5) == 1:
event.averageIntPerXing = 0.
return True
| gpl-3.0 |
alexandre/quokka | quokka/tests/test_text_utils.py | 12 | 3042 | #!/usr/bin/env python
# coding: utf-8
import unittest
from quokka.utils.text import slugify, abbreviate
class TestSlug(unittest.TestCase):
def test_should_always_return_lowercase_words(self):
self.assertEquals(slugify('ALVAROJUSTEN'), 'alvarojusten')
def test_should_replace_space_with_dash(self):
self.assertEquals(slugify('Alvaro Justen'), 'alvaro-justen')
def test_should_ignore_unecessary_spaces(self):
self.assertEquals(slugify(' alvaro justen '), 'alvaro-justen')
def test_should_replace_nonascii_chars_with_corresponding_ascii_chrs(self):
self.assertEquals(slugify(u'áÁàÀãÃâÂäÄ'), 'aaaaaaaaaa')
self.assertEquals(slugify(u'éÉèÈẽẼêÊëË'), 'eeeeeeeeee')
self.assertEquals(slugify(u'íÍìÌĩĨîÎïÏ'), 'iiiiiiiiii')
self.assertEquals(slugify(u'óÓòÒõÕôÔöÖ'), 'oooooooooo')
self.assertEquals(slugify(u'úÚùÙũŨûÛüÜ'), 'uuuuuuuuuu')
self.assertEquals(slugify(u'ćĆĉĈçÇ'), 'cccccc')
def test_should_accept_unicode_text(self):
self.assertEquals(slugify(u'Álvaro Justen'), 'alvaro-justen')
def test_should_accept_other_input_encodings(self):
slugged_text = slugify(u'Álvaro Justen')
self.assertEquals(slugged_text, 'alvaro-justen')
def test_should_accept_only_ascii_letters_and_numbers(self):
slugged_text = slugify('''qwerty123456"'@#$%*()_+\|<>,.;:/?]~[`{}^ ''')
self.assertEquals(slugged_text, 'qwerty123456')
class TestAbbreviate(unittest.TestCase):
def test_name_and_last_name_should_return_equal(self):
name = 'Álvaro Justen'
expected = 'Álvaro Justen'
self.assertEquals(abbreviate(name), expected)
def test_name_with_two_surnames_should_abbreviate_the_middle_one(self):
name = 'Álvaro Fernandes Justen'
expected = 'Álvaro F. Justen'
self.assertEquals(abbreviate(name), expected)
def test_three_surnames_should_abbreviate_the_two_in_the_middle(self):
name = 'Álvaro Fernandes Abreu Justen'
expected = 'Álvaro F. A. Justen'
self.assertEquals(abbreviate(name), expected)
def test_should_not_abbreviate_tiny_words(self):
name = 'Álvaro Fernandes de Abreu Justen'
expected = 'Álvaro F. de A. Justen'
self.assertEquals(abbreviate(name), expected)
name = 'Fulano da Costa e Silva'
expected = 'Fulano da C. e Silva'
self.assertEquals(abbreviate(name), expected)
name = 'Fulano dos Santos'
expected = 'Fulano dos Santos'
self.assertEquals(abbreviate(name), expected)
def test_should_not_abbreviate_next_surname_if_pretty_is_true(self):
name = 'Álvaro Fernandes de Abreu Justen'
expected = 'Álvaro F. de Abreu Justen'
self.assertEquals(abbreviate(name, pretty=True), expected)
name = 'Rafael da Costa Rodrigues Silva'
expected = 'Rafael da Costa R. Silva'
self.assertEquals(abbreviate(name, pretty=True), expected)
| mit |
amisrs/one-eighty | angular_flask/lib/python2.7/site-packages/sqlalchemy/sql/expression.py | 6 | 219350 | # sql/expression.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`.ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`.ClauseElement` structures
together, and in other cases simply return a single :class:`.ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools
import re
from operator import attrgetter
from .. import util, exc, inspection
from . import operators
from .operators import ColumnOperators
from .visitors import Visitable, cloned_traverse
import operator
functions = util.importlater("sqlalchemy.sql", "functions")
sqlutil = util.importlater("sqlalchemy.sql", "util")
sqltypes = util.importlater("sqlalchemy", "types")
default = util.importlater("sqlalchemy.engine", "default")
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
NO_ARG = util.symbol('NO_ARG')
def nullsfirst(column):
"""Return a NULLS FIRST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullsfirst())
produces::
ORDER BY mycol DESC NULLS FIRST
"""
return UnaryExpression(column, modifier=operators.nullsfirst_op)
def nullslast(column):
"""Return a NULLS LAST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullslast())
produces::
ORDER BY mycol DESC NULLS LAST
"""
return UnaryExpression(column, modifier=operators.nullslast_op)
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol))
produces::
ORDER BY mycol DESC
"""
return UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(asc(table1.mycol))
produces::
ORDER BY mycol ASC
"""
return UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.outerjoin()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.join()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`.FromClause`.
The returned object is an instance of :class:`.Select`.
All arguments which accept :class:`.ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`.select`.
:param columns:
A list of :class:`.ClauseElement` objects, typically
:class:`.ColumnElement` objects or subclasses, which will form the
columns clause of the resulting statement. For all members which are
instances of :class:`.Selectable`, the individual :class:`.ColumnElement`
members of the :class:`.Selectable` will be added individually to the
columns clause. For example, specifying a
:class:`~sqlalchemy.schema.Table` instance will result in all the
contained :class:`~sqlalchemy.schema.Column` objects within to be added
to the columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
:param whereclause:
A :class:`.ClauseElement` expression which will be used to form the
``WHERE`` clause.
:param from_obj:
A list of :class:`.ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from" objects are
automatically located within the columns and whereclause ClauseElements.
Use this parameter to explicitly specify "from" objects which are not
automatically locatable. This could include
:class:`~sqlalchemy.schema.Table` objects that aren't otherwise present,
or :class:`.Join` objects whose presence will supercede that of the
:class:`~sqlalchemy.schema.Table` objects already located in the other
clauses.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind=None:
an :class:`~.base.Engine` or :class:`~.base.Connection` instance
to which the
resulting :class:`.Select` object will be bound. The :class:`.Select`
object will otherwise automatically bind to whatever
:class:`~.base.Connectable` instances can be located within its contained
:class:`.ClauseElement` members.
:param correlate=True:
indicates that this :class:`.Select` object should have its
contained :class:`.FromClause` elements "correlated" to an enclosing
:class:`.Select` object. This means that any :class:`.ClauseElement`
instance within the "froms" collection of this :class:`.Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the Postgresql dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available via the :meth:`~.Select.distinct`
generative method.
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
Certain database dialects also support
alternate values for this parameter:
* With the MySQL dialect, the value ``"read"`` translates to
``LOCK IN SHARE MODE``.
* With the Oracle and Postgresql dialects, the value ``"nowait"``
translates to ``FOR UPDATE NOWAIT``.
* With the Postgresql dialect, the values "read" and ``"read_nowait"``
translate to ``FOR SHARE`` and ``FOR SHARE NOWAIT``, respectively.
.. versionadded:: 0.7.7
:param group_by:
a list of :class:`.ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
:param having:
a :class:`.ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
:param limit=None:
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
:param offset=None:
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param order_by:
a scalar or list of :class:`.ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`.Select` object will use these
names as well for targeting column members.
use_labels is also available via the :meth:`~.SelectBase.apply_labels`
generative method.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj,
**kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`.Alias` object derived
from a :class:`.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Represent an ``INSERT`` statement via the :class:`.Insert` SQL
construct.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Represent an ``UPDATE`` statement via the :class:`.Update` SQL
construct.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause=None, **kwargs):
"""Represent a ``DELETE`` statement via the :class:`.Delete` SQL
construct.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all :class:`.ColumnElement`
subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause.
e.g.::
distinct(a)
renders::
DISTINCT a
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`.ColumnElement` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
return Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def alias(selectable, name=None):
"""Return an :class:`.Alias` object.
An :class:`.Alias` represents any :class:`.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`~.FromClause.alias` method
available on all :class:`.FromClause` subclasses.
When an :class:`.Alias` is created from a :class:`.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`.select` objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
"""
return Alias(selectable, name=name)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
return Tuple(*expr)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type,
on the Python side only.
:func:`.type_coerce` is roughly similar to :func:`.cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
type_ = sqltypes.to_instance(type_)
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif isinstance(expr, BindParameter):
bp = expr._clone()
bp.type = type_
return bp
elif not isinstance(expr, Visitable):
if expr is None:
return null()
else:
return literal(expr, type_=type_)
else:
return Label(None, expr, type_=type_)
def label(name, obj):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`.ColumnElement`.
name
label name
obj
a :class:`.ColumnElement`.
"""
return Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of :class:`.ColumnClause`, which
represents the "syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object. It is often used directly
within :func:`~.expression.select` constructs or with lightweight
:func:`~.expression.table` constructs.
Note that the :func:`~.expression.column` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the
``sql`` package::
from sqlalchemy.sql import table, column
:param text: the name of the column. Quoting rules will be applied
to the clause like any other column name. For textual column constructs
that are not to be quoted, use the :func:`literal_column` function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
which will provide result-set translation for this column.
See :class:`.ColumnClause` for further examples.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Represent a textual table clause.
The object returned is an instance of :class:`.TableClause`, which
represents the "syntactical" portion of the schema-level
:class:`~.schema.Table` object.
It may be used to construct lightweight table constructs.
Note that the :func:`~.expression.table` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the
``sql`` package::
from sqlalchemy.sql import table, column
:param name: Name of the table.
:param columns: A collection of :func:`~.expression.column` constructs.
See :class:`.TableClause` for further examples.
"""
return TableClause(name, *columns)
def bindparam(key, value=NO_ARG, type_=None, unique=False, required=NO_ARG,
quote=None, callable_=None):
"""Create a bind parameter clause with the given key.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
Defaults to ``None``, however if neither ``value`` nor
``callable`` are passed explicitly, the ``required`` flag will be
set to ``True`` which has the effect of requiring a value be present
when the statement is actually executed.
.. versionchanged:: 0.8 The ``required`` flag is set to ``True``
automatically if ``value`` or ``callable`` is not passed.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
If ``True``, a value is required at execution time. If not passed,
is set to ``True`` or ``False`` based on whether or not
one of ``value`` or ``callable`` were passed..
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
return BindParameter(key, value, type_=type_,
callable_=callable_,
unique=unique, required=required,
quote=quote)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create a SQL construct that is represented by a literal string.
E.g.::
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
return TextClause(text, bind=bind, *args, **kwargs)
def over(func, partition_by=None, order_by=None):
"""Produce an OVER clause against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
return Over(func, partition_by=partition_by, order_by=order_by)
def null():
"""Return a :class:`Null` object, which compiles to ``NULL``.
"""
return Null()
def true():
"""Return a :class:`True_` object, which compiles to ``true``, or the
boolean equivalent for the target dialect.
"""
return True_()
def false():
"""Return a :class:`False_` object, which compiles to ``false``, or the
boolean equivalent for the target dialect.
"""
return False_()
class _FunctionGenerator(object):
"""Generate :class:`.Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
tokens = len(self.__names)
if tokens == 2:
package, fname = self.__names
elif tokens == 1:
package, fname = "_default", self.__names[0]
else:
package = None
if package is not None and \
package in functions._registry and \
fname in functions._registry[package]:
func = functions._registry[package][fname]
return func(*c, **o)
return Function(self.__names[-1],
packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
"""Generate SQL function expressions.
:data:`.func` is a special object instance which generates SQL
functions based on name-based attributes, e.g.::
>>> print func.count(1)
count(:param_1)
The element is a column-oriented SQL element like any other, and is
used in that way::
>>> print select([func.count(table.c.id)])
SELECT count(sometable.id) FROM sometable
Any name can be given to :data:`.func`. If the function name is unknown to
SQLAlchemy, it will be rendered exactly as is. For common SQL functions
which SQLAlchemy is aware of, the name may be interpreted as a *generic
function* which will be compiled appropriately to the target database::
>>> print func.current_timestamp()
CURRENT_TIMESTAMP
To call functions which are present in dot-separated packages,
specify them in the same manner::
>>> print func.stats.yield_curve(5, 10)
stats.yield_curve(:yield_curve_1, :yield_curve_2)
SQLAlchemy can be made aware of the return type of functions to enable
type-specific lexical and result-based behavior. For example, to ensure
that a string-based function returns a Unicode value and is similarly
treated as a string in expressions, specify
:class:`~sqlalchemy.types.Unicode` as the type:
>>> print func.my_string(u'hi', type_=Unicode) + ' ' + \
... func.my_string(u'there', type_=Unicode)
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a :data:`.func` call is usually an instance of
:class:`.Function`.
This object meets the "column" interface, including comparison and labeling
functions. The object can also be passed the :meth:`~.Connectable.execute`
method of a :class:`.Connection` or :class:`.Engine`, where it will be
wrapped inside of a SELECT statement first::
print connection.execute(func.current_timestamp()).scalar()
In a few exception cases, the :data:`.func` accessor
will redirect a name to a built-in expression such as :func:`.cast`
or :func:`.extract`, as these names have well-known meaning
but are not exactly the same as "functions" from a SQLAlchemy
perspective.
.. versionadded:: 0.8 :data:`.func` can return non-function expression
constructs for common quasi-functional names like :func:`.cast`
and :func:`.extract`.
Functions which are interpreted as "generic" functions know how to
calculate their return type automatically. For a listing of known generic
functions, see :ref:`generic_functions`.
"""
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _truncated_label(unicode):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
unicode(self) +
unicode(other))
def __radd__(self, other):
return _anonymous_label(
unicode(other) +
unicode(self))
def apply_map(self, map_):
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, basestring):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _clone(element, **kw):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, basestring):
return TextClause(unicode(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return literal_column(str(element))
def _interpret_as_from(element):
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, basestring):
return TextClause(unicode(element))
elif hasattr(insp, "selectable"):
return insp.selectable
raise exc.ArgumentError("FROM expression expected")
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return null()
elif element is False:
return false()
elif element is True:
return true()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return sqlutil.Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return sqlutil.Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
inspection._self_inspects(ClauseElement)
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class _DefaultColumnComparator(operators.ColumnOperators):
"""Defines comparison and math operations.
See :class:`.ColumnOperators` and :class:`.Operators` for descriptions
of all operations.
"""
@util.memoized_property
def type(self):
return self.expr.type
def operate(self, op, *other, **kwargs):
o = self.operators[op.__name__]
return o[0](self, self.expr, op, *(other + o[1:]), **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = self.operators[op.__name__]
return o[0](self, self.expr, op, other, reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than Postgresql don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, other_comparator.type
def _boolean_compare(self, expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
**kwargs):
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()' operators can "
"be used with None/True/False")
else:
obj = self._check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def _binary_operate(self, expr, op, obj, reverse=False, result_type=None,
**kw):
obj = self._check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(left, right, op, type_=result_type)
def _scalar(self, expr, op, fn, **kw):
return fn(expr)
def _in_impl(self, expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return self._boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return self._boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return self._boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, ColumnOperators):
raise exc.InvalidRequestError('in() function accept'
's either a list of non-selectable values, '
'or a selectable: %r' % o)
elif o is None:
o = null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like
# comparison against zero row selectable. We use != to
# build the contradiction as it handles NULL values
# appropriately, i.e. "not (x IN ())" should not return NULL
# values for x.
util.warn('The IN-predicate on "%s" was invoked with an '
'empty sequence. This results in a '
'contradiction, which nonetheless can be '
'expensive to evaluate. Consider alternative '
'strategies for improved performance.' % expr)
return expr != expr
return self._boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _unsupported_impl(self, expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _neg_impl(self, expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg)
def _match_impl(self, expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return self._boolean_compare(expr, operators.match_op,
self._check_literal(expr, operators.match_op,
other))
def _distinct_impl(self, expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(self, expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
self._check_literal(expr, operators.and_, cleft),
self._check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False),
operators.between_op)
def _collate_impl(self, expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operators = {
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_binary_operate,),
"concat_op": (_binary_operate,),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, desc),
"asc_op": (_scalar, asc),
"nullsfirst_op": (_scalar, nullsfirst),
"nullslast_op": (_scalar, nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_unsupported_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
}
def _check_literal(self, expr, operator, other):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
isinstance(other.type, sqltypes.NullType):
# TODO: perhaps we should not mutate the incoming
# bindparam() here and instead make a copy of it.
# this might be the only place that we're mutating
# an incoming construct.
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, sqltypes.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, (ColumnElement, TextClause)):
return expr._bind_param(operator, other)
else:
return other
class ColumnElement(ClauseElement, ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
:class:`.ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`.ColumnElement` may be associated with
a :class:`.Selectable` which was derived from another :class:`.Selectable`.
An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
:class:`~sqlalchemy.schema.Table`. For the ambitious, an in-depth
discussion of this concept can be found at
`Expression Transformations <http://techspot.zzzeek.org/2008/01/23/expression-transformations/>`_.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
_key_label = None
_alt_names = ()
@util.memoized_property
def type(self):
return sqltypes.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
key = str(self)
else:
key = name
co = ColumnClause(_as_truncated(name) if name_is_truncatable else name,
selectable,
type_=getattr(self,
'type', None))
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self._data.update((c.key, c) for c in cols)
self.__dict__['_all_cols'] = util.column_set(self)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self._data[other.name]
self._all_cols.remove(other)
if column.key in self._data:
self._all_cols.remove(self._data[column.key])
self._all_cols.add(column)
self._data[column.key] = column
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'%r, which has the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None), value))
self._all_cols.remove(existing)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
ColumnElement.proxy_set._reset(value)
self._all_cols.add(value)
self._data[key] = value
def clear(self):
self._data.clear()
self._all_cols.clear()
def remove(self, column):
del self._data[column.key]
self._all_cols.remove(column)
def update(self, value):
self._data.update(value)
self._all_cols.clear()
self._all_cols.update(self._data.values())
def extend(self, iter):
self.update((c.key, c) for c in iter)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
self.__dict__['_all_cols'] = util.column_set(self._data.values())
def contains_column(self, col):
# this has to be done via set() membership
return col in self._all_cols
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_cols)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, colset):
util.ImmutableProperties.__init__(self, data)
self.__dict__['_all_cols'] = colset
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
is_selectable = True
@property
def selectable(self):
return self
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
The most common forms of :class:`.FromClause` are the
:class:`.Table` and the :func:`.select` constructs. Key
features common to all :class:`.FromClause` objects include:
* a :attr:`.c` collection, which provides per-name access to a collection
of :class:`.ColumnElement` objects.
* a :attr:`.primary_key` attribute, which is a collection of all those
:class:`.ColumnElement` objects that indicate the ``primary_key`` flag.
* Methods to generate various derivations of a "from" clause, including
:meth:`.FromClause.alias`, :meth:`.FromClause.join`,
:meth:`.FromClause.select`.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
_memoized_property = util.group_expirable_memoized_property(["_columns"])
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`.FromClause`.
.. seealso::
:func:`~.sql.expression.select` - general purpose
method which allows for arbitrary column lists.
"""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`.FromClause`.
This is shorthand for calling::
from sqlalchemy import alias
a = alias(self, name=name)
See :func:`~.expression.alias` for details.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
def replace_selectable(self, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given
:class:`.ColumnElement` is actually present within a sub-element
of this :class:`.FromClause`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])
).intersection(expanded_proxy_set):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (not require_embedded
or embedded(expanded_proxy_set, target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""A named-based collection of :class:`.ColumnElement` objects
maintained by this :class:`.FromClause`.
The :attr:`.columns`, or :attr:`.c` collection, is the gateway
to the construction of SQL expressions using table-bound or
other selectable-bound columns::
select([mytable]).where(mytable.c.somecolumn == 5)
"""
if '_columns' not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(attrgetter('columns'),
doc="An alias for the :attr:`.columns` attribute.")
_select_iterable = property(attrgetter('columns'))
def _init_collections(self):
assert '_columns' not in self.__dict__
assert 'primary_key' not in self.__dict__
assert 'foreign_keys' not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
@property
def _cols_populated(self):
return '_columns' in self.__dict__
def _populate_column_collection(self):
"""Called on subclasses to establish the .c collection.
Each implementation has a different way of establishing
this collection.
"""
def _refresh_for_new_column(self, column):
"""Given a column added to the .c collection of an underlying
selectable, produce the local version of that column, assuming this
selectable ultimately should proxy this column.
this is used to "ping" a derived selectable to add a new column
to its .c. collection when a Column has been added to one of the
Table objects it ultimtely derives from.
If the given selectable hasn't populated it's .c. collection yet,
it should at least pass on the message to the contained selectables,
but it will return None.
This method is currently used by Declarative to allow Table
columns to be added to a partially constructed inheritance
mapping that may have already produced joins. The method
isn't public right now, as the full span of implications
and/or caveats aren't yet clear.
It's also possible that this functionality could be invoked by
default via an event, which would require that
selectables maintain a weak referencing collection of all
derivations.
"""
if not self._cols_populated:
return None
elif column.key in self.columns and self.columns[column.key] is column:
return column
else:
return None
class BindParameter(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
_is_crud = False
def __init__(self, key, value, type_=None, unique=False,
callable_=None,
isoutparam=False, required=False,
quote=None,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a BindParameter.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
:param required:
a value is required at execution time.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
"""
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.quote = quote
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = sqltypes._type_map.get(type(value),
sqltypes.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
# legacy, some outside users may be calling this
_Executable = Executable
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None,
):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return sqltypes.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self.bindparams = dict((b.key, clone(b, **kw))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword in a SQL statement.
Public constructor is the :func:`false()` function.
"""
__visit_name__ = 'false'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword in a SQL statement.
Public constructor is the :func:`true()` function.
"""
__visit_name__ = 'true'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_',
sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
class Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
self.type = kw.pop('type_', None)
if self.type is None:
self.type = _type_from_args(clauses)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs.
.. seealso::
:class:`.Function` - named SQL function.
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
packagenames = ()
def __init__(self, *clauses, **kwargs):
"""Construct a :class:`.FunctionElement`.
"""
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
"""Fulfill the 'columns' contract of :class:`.ColumnElement`.
Returns a single-element list consisting of this object.
"""
return [self]
@util.memoized_property
def clauses(self):
"""Return the underlying :class:`.ClauseList` which contains
the arguments for this :class:`.FunctionElement`.
"""
return self.clause_expr.element
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.row_number().over(order_by='x')
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
See :func:`~.expression.over` for a full description.
.. versionadded:: 0.7
"""
return over(self, partition_by=partition_by, order_by=order_by)
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone, **kw):
self.clause_expr = clone(self.clause_expr, **kw)
self._reset_exported()
FunctionElement.clauses._reset(self)
def select(self):
"""Produce a :func:`~.expression.select` construct
against this :class:`.FunctionElement`.
This is shorthand for::
s = select([function_element])
"""
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind' and return a scalar value.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.scalar` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute().scalar()
def execute(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind'.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.execute` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute()
def _bind_param(self, operator, obj):
return BindParameter(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function.
See the superclass :class:`.FunctionElement` for a description
of public methods.
.. seealso::
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
"""Construct a :class:`.Function`.
The :data:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
class Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return Grouping(self)
else:
return self
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, basestring):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Exists(UnaryExpression):
__visit_name__ = UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (SelectBase, ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
UnaryExpression.__init__(self, s, operator=operators.exists,
type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, *fromclause):
e = self._clone()
e.element = self.element.correlate(*fromclause).self_group()
return e
def correlate_except(self, *fromclause):
e = self._clone()
e.element = self.element.correlate_except(*fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`.Exists` construct, applying the given
expression to the :meth:`.Select.select_from` method of the select
statement contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`.FromClause`
elements.
The public constructor function for :class:`.Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`.FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
"""Construct a new :class:`.Join`.
The usual entrypoint here is the :func:`~.expression.join`
function or the :meth:`.FromClause.join` method of any
:class:`.FromClause` object.
"""
self.left = _interpret_as_from(left)
self.right = _interpret_as_from(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or \
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self.primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _refresh_for_new_column(self, column):
col = self.left._refresh_for_new_column(column)
if col is None:
col = self.right._refresh_for_new_column(column)
if col is not None:
if self._cols_populated:
self._columns[col._label] = col
self.foreign_keys.add(col)
if col.primary_key:
self.primary_key.add(col)
return col
return None
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return sqlutil.join_condition(left, right, a_subset=left_right)
def select(self, whereclause=None, **kwargs):
"""Create a :class:`.Select` from this :class:`.Join`.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\\
where(whereclause).\\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""return an alias of this :class:`.Join`.
Used against a :class:`.Join` object,
:meth:`~.Join.alias` calls the :meth:`~.Join.select`
method first so that a subquery against a
:func:`.select` construct is generated.
the :func:`~expression.select` construct also has the
``correlate`` flag set to ``False`` and will not
auto-correlate inside an enclosing :func:`~expression.select`
construct.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\\
select_from(j).\\
with_labels(True).\\
correlate(False),
name=name
)
See :func:`~.expression.alias` for further details on
aliases.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`~.expression.alias` module level
function as well as the :meth:`.FromClause.alias` method available on all
:class:`.FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, 'name', None)
name = _anonymous_label('%%(%d %s)s' % (id(self), name
or 'anon'))
self.name = name
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _refresh_for_new_column(self, column):
col = self.element._refresh_for_new_column(column)
if col is not None:
if not self._cols_populated:
return None
else:
return col._make_proxy(self)
else:
return None
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class CTE(Alias):
"""Represent a Common Table Expression.
The :class:`.CTE` object is obtained using the
:meth:`.SelectBase.cte` method from any selectable.
See that method for complete examples.
.. versionadded:: 0.7.6
"""
__visit_name__ = 'cte'
def __init__(self, selectable,
name=None,
recursive=False,
cte_alias=False,
_restates=frozenset()):
self.recursive = recursive
self.cte_alias = cte_alias
self._restates = _restates
super(CTE, self).__init__(selectable, name=name)
def alias(self, name=None):
return CTE(
self.original,
name=name,
recursive=self.recursive,
cte_alias=self.name
)
def union(self, other):
return CTE(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
def union_all(self, other):
return CTE(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', sqltypes.NULLTYPE)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
# this could be
# self.element.foreign_keys
# see SelectableTest.test_join_condition
return set()
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element}
def __setstate__(self, state):
self.element = state['element']
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`.ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self.quote = element.quote
self._proxies = [element]
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`.Selectable`.
:class:`.ColumnClause` is constructed by itself typically via
the :func:`~.expression.column` function. It may be placed directly
into constructs such as :func:`.select` constructs::
from sqlalchemy.sql import column, select
c1, c2 = column("c1"), column("c2")
s = select([c1, c2]).where(c1==5)
There is also a variant on :func:`~.expression.column` known
as :func:`~.expression.literal_column` - the difference is that
in the latter case, the string value is assumed to be an exact
expression, rather than a column name, so that no quoting rules
or similar are applied::
from sqlalchemy.sql import literal_column, select
s = select([literal_column("5 + 7")])
:class:`.ColumnClause` can also be used in a table-like
fashion by combining the :func:`~.expression.column` function
with the :func:`~.expression.table` function, to produce
a "lightweight" form of table metadata::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The above construct can be created in an ad-hoc fashion and is
not associated with any :class:`.schema.MetaData`, unlike it's
more full fledged :class:`.schema.Table` counterpart.
:param text: the text of the element.
:param selectable: parent selectable.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`.ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and other.is_literal
):
return super(ColumnClause, self).\
_compare_name_for_result(other)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class TableClause(Immutable, FromClause):
"""Represents a minimal "table" construct.
The constructor for :class:`.TableClause` is the
:func:`~.expression.table` function. This produces
a lightweight table object that has only a name and a
collection of columns, which are typically produced
by the :func:`~.expression.column` function::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`.TableClause` construct serves as the base for
the more commonly used :class:`~.schema.Table` object, providing
the usual set of :class:`~.expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`~.schema.Table`, including constraints, references to other
tables, or support for :class:`.MetaData`-level services. It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`~.schema.Table`
is not on hand.
"""
__visit_name__ = 'table'
named_with_column = True
implicit_returning = False
""":class:`.TableClause` doesn't support having a primary key or column
-level defaults, so implicit returning doesn't apply."""
_autoincrement_column = None
"""No PK or default support so no autoincrement column."""
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
def _init_collections(self):
pass
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.key] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`.insert` construct against this
:class:`.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`.insert` for argument and usage information.
"""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`.update` construct against this
:class:`.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`.update` for argument and usage information.
"""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`.delete` construct against this
:class:`.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`.delete` for argument and usage information.
"""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class SelectBase(Executable, FromClause):
"""Base class for :class:`.Select` and ``CompoundSelects``."""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit = None
_offset = None
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if limit is not None:
self._limit = util.asint(limit)
if offset is not None:
self._offset = util.asint(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(*util.to_list(order_by))
if group_by is not None:
self._group_by_clause = ClauseList(*util.to_list(group_by))
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`ScalarSelect`.
"""
return ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`~.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
def cte(self, name=None, recursive=False):
"""Return a new :class:`.CTE`, or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
SQLAlchemy detects :class:`.CTE` objects, which are treated
similarly to :class:`.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
.. versionadded:: 0.7.6
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples illustrate two examples from
Postgresql's documentation at
http://www.postgresql.org/docs/8.4/static/queries-with.html.
Example 1, non recursive::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\\
where(parts.c.part=='our part').\\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.part,
parts_alias.c.sub_part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
]).\
select_from(included_parts.join(parts,
included_parts.c.part==parts.c.part)).\\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
.. seealso::
:meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`.
"""
return CTE(self, name=name, recursive=recursive)
@_generative
@util.deprecated('0.6',
message=":func:`.autocommit` is deprecated. Use "
":func:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True."""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied."""
self._limit = util.asint(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied."""
self._offset = util.asint(offset)
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
This is an **in-place** mutation method; the
:meth:`~.SelectBase.order_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
This is an **in-place** mutation method; the
:meth:`~.SelectBase.group_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class ScalarSelect(Generative, Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.')
c = columns
@_generative
def where(self, crit):
"""Apply a WHERE clause to the SELECT statement referred to
by this :class:`.ScalarSelect`.
"""
self.element = self.element.where(crit)
def self_group(self, **kwargs):
return self
class CompoundSelect(SelectBase):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
def __init__(self, keyword, *selects, **kwargs):
self._auto_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError('All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' % (1, len(self.selects[0].c), n
+ 1, len(s.c)))
self.selects.append(s.self_group(self))
SelectBase.__init__(self, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(self,
name=cols[0]._label if self.use_labels else None,
key=cols[0]._key_label if self.use_labels else None)
# hand-construct the "_proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy._proxies = [c._annotate({'weight': i + 1}) for (i,
c) in enumerate(cols)]
def _refresh_for_new_column(self, column):
for s in self.selects:
s._refresh_for_new_column(column)
if not self._cols_populated:
return None
raise NotImplementedError("CompoundSelect constructs don't support "
"addition of columns to underlying selectables")
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class HasPrefixes(object):
_prefixes = ()
@_generative
def prefix_with(self, *expr, **kw):
"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`.prefix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
dialect = kw.pop('dialect', None)
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" %
",".join(kw))
self._setup_prefixes(expr, dialect)
def _setup_prefixes(self, prefixes, dialect=None):
self._prefixes = self._prefixes + tuple(
[(_literal_as_text(p), dialect) for p in prefixes])
class Select(HasPrefixes, SelectBase):
"""Represents a ``SELECT`` statement.
.. seealso::
:func:`~.expression.select` - the function which creates
a :class:`.Select` object.
:ref:`coretutorial_selecting` - Core Tutorial description
of :func:`.select`.
"""
__visit_name__ = 'select'
_prefixes = ()
_hints = util.immutabledict()
_distinct = False
_from_cloned = None
_correlate = ()
_correlate_except = None
_memoized_property = SelectBase._memoized_property
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`SelectBase` superclass.
"""
self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_text(e)
for e in util.to_list(distinct)
]
if from_obj is not None:
self._from_obj = util.OrderedSet(
_interpret_as_from(f)
for f in util.to_list(from_obj))
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._setup_prefixes(prefixes)
SelectBase.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
def add(items):
for item in items:
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
add(_from_objects(*self._raw_columns))
if self._whereclause is not None:
add(_from_objects(self._whereclause))
add(self._from_obj)
return froms
def _get_display_froms(self, explicit_correlate_froms=None,
implicit_correlate_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(itertools.chain(*[
_expand_cloned(f._hide_froms)
for f in froms]))
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f] for f in
toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if self._correlate:
to_correlate = self._correlate
if to_correlate:
froms = [
f for f in froms if f not in
_cloned_intersection(
_cloned_intersection(froms, explicit_correlate_froms or ()),
to_correlate
)
]
if self._correlate_except is not None:
froms = [
f for f in froms if f not in
_cloned_difference(
_cloned_intersection(froms, explicit_correlate_froms or ()),
self._correlate_except
)
]
if self._auto_correlate and \
implicit_correlate_froms and \
len(froms) > 1:
froms = [
f for f in froms if f not in
_cloned_intersection(froms, implicit_correlate_froms)
]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses due to "
"auto-correlation; specify "
"correlate(<tables>) to control "
"correlation manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given selectable to this
:class:`.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
"""
self._hints = self._hints.union(
{(selectable, dialect_name): text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict((f, clone(f, **kw))
for f in self._from_obj.union(self._froms))
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(from_cloned[f] for f in
self._from_obj)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with it's previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f) for f in
self._correlate).union(self._correlate)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supercede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
self.append_column(column)
def reduce_columns(self, only_synonyms=True):
"""Return a new :func`.select` construct with redundantly
named, equivalently-valued columns removed from the columns clause.
"Redundant" here means two columns where one refers to the
other either based on foreign key, or via a simple equality
comparison in the WHERE clause of the statement. The primary purpose
of this method is to automatically construct a select statement
with all uniquely-named columns, without the need to use
table-qualified labels as :meth:`.apply_labels` does.
When columns are omitted based on foreign key, the referred-to
column is the one that's kept. When columns are omitted based on
WHERE eqivalence, the first column in the columns clause is the
one that's kept.
:param only_synonyms: when True, limit the removal of columns
to those which have the same name as the equivalent. Otherwise,
all columns that are equivalent to another are removed.
.. versionadded:: 0.8
"""
return self.with_only_columns(
sqlutil.reduce_columns(
self.inner_columns,
only_synonyms=only_synonyms,
*(self._whereclause, ) + tuple(self._from_obj)
)
)
@_generative
def with_only_columns(self, columns):
"""Return a new :func:`.select` construct with its columns
clause replaced with the given columns.
.. versionchanged:: 0.7.3
Due to a bug fix, this method has a slight
behavioral change as of version 0.7.3.
Prior to version 0.7.3, the FROM clause of
a :func:`.select` was calculated upfront and as new columns
were added; in 0.7.3 and later it's calculated
at compile time, fixing an issue regarding late binding
of columns to parent tables. This changes the behavior of
:meth:`.Select.with_only_columns` in that FROM clauses no
longer represented in the new list are dropped,
but this behavior is more consistent in
that the FROM clauses are consistently derived from the
current columns clause. The original intent of this method
is to allow trimming of the existing columns list to be fewer
columns than originally present; the use case of replacing
the columns list with an entirely different one hadn't
been anticipated until 0.7.3 was released; the usage
guidelines below illustrate how this should be done.
This method is exactly equivalent to as if the original
:func:`.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print s1
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\\
... select_from(table1.join(table2,
... table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to :meth:`.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`.select` construct in the first place with the given
columns, the columns passed to :meth:`.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`.select` construct, not those which are available
from the ``.c`` collection of that :func:`.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_text(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def select_from(self, fromclause):
"""return a new :func:`.select` construct with the
given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`.Table` or other selectable
will have no effect. Passing a :class:`.Join` that refers
to an already present :class:`.Table` or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead
rendering it into a JOIN clause.
While the typical purpose of :meth:`.Select.select_from` is to
replace the default, derived FROM clause with a join, it can
also be called with individual table elements, multiple times
if desired, in the case that the FROM clause cannot be fully
derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
"""return a new :class:`.Select` which will correlate the given FROM
clauses to that of an enclosing :class:`.Select`.
Calling this method turns off the :class:`.Select` object's
default behavior of "auto-correlation". Normally, FROM elements
which appear in a :class:`.Select` that encloses this one via
its :term:`WHERE clause`, ORDER BY, HAVING or
:term:`columns clause` will be omitted from this :class:`.Select`
object's :term:`FROM clause`.
Setting an explicit correlation collection using the
:meth:`.Select.correlate` method provides a fixed list of FROM objects
that can potentially take place in this process.
When :meth:`.Select.correlate` is used to apply specific FROM clauses
for correlation, the FROM elements become candidates for
correlation regardless of how deeply nested this :class:`.Select`
object is, relative to an enclosing :class:`.Select` which refers to
the same FROM object. This is in contrast to the behavior of
"auto-correlation" which only correlates to an immediate enclosing
:class:`.Select`. Multi-level correlation ensures that the link
between enclosed and enclosing :class:`.Select` is always via
at least one WHERE/ORDER BY/HAVING/columns clause in order for
correlation to take place.
If ``None`` is passed, the :class:`.Select` object will correlate
none of its FROM entries, and all will render unconditionally
in the local FROM clause.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate collection.
.. versionchanged:: 0.8.0 ORM-mapped classes are accepted by
:meth:`.Select.correlate`.
.. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no
longer unconditionally removes entries from the FROM clause; instead,
the candidate FROM entries must also be matched by a FROM entry
located in an enclosing :class:`.Select`, which ultimately encloses
this one as present in the WHERE clause, ORDER BY clause, HAVING
clause, or columns clause of an enclosing :meth:`.Select`.
.. versionchanged:: 0.8.2 explicit correlation takes place
via any level of nesting of :class:`.Select` objects; in previous
0.8 versions, correlation would only occur relative to the immediate
enclosing :class:`.Select` construct.
.. seealso::
:meth:`.Select.correlate_except`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclauses)
@_generative
def correlate_except(self, *fromclauses):
"""return a new :class:`.Select` which will omit the given FROM
clauses from the auto-correlation process.
Calling :meth:`.Select.correlate_except` turns off the
:class:`.Select` object's default behavior of
"auto-correlation" for the given FROM elements. An element
specified here will unconditionally appear in the FROM list, while
all other FROM elements remain subject to normal auto-correlation
behaviors.
.. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except`
method was improved to fully prevent FROM clauses specified here
from being omitted from the immediate FROM clause of this
:class:`.Select`.
If ``None`` is passed, the :class:`.Select` object will correlate
all of its FROM entries.
.. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will
correctly auto-correlate all FROM clauses.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate-exception collection.
.. seealso::
:meth:`.Select.correlate`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
self._correlate_except = set(self._correlate_except or ()).union(
_interpret_as_from(f) for f in fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.correlate` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause)
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
This is an **in-place** mutation method; the
:meth:`~.Select.column` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
if isinstance(column, ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.prefix_with` method is preferred, as it provides standard
:term:`method chaining`.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.where` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
whereclause = _literal_as_text(whereclause)
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.having` method is preferred, as it provides standard
:term:`method chaining`.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
This is an **in-place** mutation method; the
:meth:`~.Select.select_from` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
self._from_obj = self._from_obj.union([fromclause])
@_memoized_property
def _columns_plus_names(self):
if self.use_labels:
names = set()
def name_for_col(c):
if c._label is None:
return (None, c)
name = c._label
if name in names:
name = c.anon_label
else:
names.add(name)
return name, c
return [
name_for_col(c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
else:
return [
(None, c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
def _populate_column_collection(self):
for name, c in self._columns_plus_names:
if not hasattr(c, '_make_proxy'):
continue
if name is None:
key = None
elif self.use_labels:
key = c._key_label
if key is not None and key in self.c:
key = c.anon_label
else:
key = None
c._make_proxy(self, key=key,
name=name,
name_is_truncatable=True)
def _refresh_for_new_column(self, column):
for fromclause in self._froms:
col = fromclause._refresh_for_new_column(column)
if col is not None:
if col in self.inner_columns and self._cols_populated:
our_label = col._key_label if self.use_labels else col.key
if our_label not in self.c:
return col._make_proxy(self,
name=col._label if self.use_labels else None,
key=col._key_label if self.use_labels else None,
name_is_truncatable=True)
return None
return None
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if isinstance(against, CompoundSelect):
return self
return FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class UpdateBase(HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
kwargs = util.immutabledict()
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if isinstance(parameters, (list, tuple)) and \
isinstance(parameters[0], (list, tuple, dict)):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table that is
the target of the INSERT, UPDATE, or DELETE. Each element can be any
column expression. :class:`~sqlalchemy.schema.Table` objects will be
expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
def __init__(self, table, values, prefixes):
self.table = table
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`. However,
the :meth:`.ValuesBase.values` method can be used to "fix" a particular
set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a list
of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the :meth:`.Connection.execute`
method. Passing a list of parameter sets to :meth:`.ValuesBase.values`
produces a construct of this form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT statement many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, as it does not depend on a special SQL
syntax.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
self.kwargs = kwargs
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self.kwargs = kwargs
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning=None,
prefixes=None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.kwargs = kwargs
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
# old names for compatibility
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
| mit |
rdo-management/neutron | neutron/plugins/vmware/extensions/vnicindex.py | 22 | 1594 | # Copyright 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes
# Attribute Map
VNIC_INDEX = 'vnic_index'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
VNIC_INDEX:
{'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'convert_to': attributes.convert_to_int_if_not_none}}}
class Vnicindex(object):
@classmethod
def get_name(cls):
return "VNIC Index"
@classmethod
def get_alias(cls):
return "vnic-index"
@classmethod
def get_description(cls):
return ("Enable a port to be associated with a VNIC index")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/vnic_index/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-09-15T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
BT-fgarbely/odoo | addons/purchase/company.py | 383 | 1576 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'po_lead': fields.float(
'Purchase Lead Time', required=True,
help="Margin of error for supplier lead times. When the system"\
"generates Purchase Orders for procuring products,"\
"they will be scheduled that many days earlier "\
"to cope with unexpected supplier delays."),
}
_defaults = {
'po_lead': lambda *a: 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rismalrv/edx-platform | common/djangoapps/monitoring/signals.py | 172 | 4584 | """
Add receivers for django signals, and feed data into the monitoring system.
If a model has a class attribute 'METRIC_TAGS' that is a list of strings,
those fields will be retrieved from the model instance, and added as tags to
the recorded metrics.
"""
from django.db.models.signals import post_save, post_delete, m2m_changed, post_init
from django.dispatch import receiver
import dogstats_wrapper as dog_stats_api
def _database_tags(action, sender, kwargs):
"""
Return a tags for the sender and database used in django.db.models signals.
Arguments:
action (str): What action is being performed on the db model.
sender (Model): What model class is the action being performed on.
kwargs (dict): The kwargs passed by the model signal.
"""
tags = _model_tags(kwargs, 'instance')
tags.append(u'action:{}'.format(action))
if 'using' in kwargs:
tags.append(u'database:{}'.format(kwargs['using']))
return tags
def _model_tags(kwargs, key):
"""
Return a list of all tags for all attributes in kwargs[key].MODEL_TAGS,
plus a tag for the model class.
"""
if key not in kwargs:
return []
instance = kwargs[key]
tags = [
u'{}.{}:{}'.format(key, attr, getattr(instance, attr))
for attr in getattr(instance, 'MODEL_TAGS', [])
]
tags.append(u'model_class:{}'.format(instance.__class__.__name__))
return tags
@receiver(post_init, dispatch_uid='edxapp.monitoring.post_init_metrics')
def post_init_metrics(sender, **kwargs):
"""
Record the number of times that django models are instantiated.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this initialization (optional).
instance (Model instance): The instance being initialized (optional).
"""
tags = _database_tags('initialized', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_save, dispatch_uid='edxapp.monitoring.post_save_metrics')
def post_save_metrics(sender, **kwargs):
"""
Record the number of times that django models are saved (created or updated).
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this update (optional).
instance (Model instance): The instance being updated (optional).
"""
action = 'created' if kwargs.pop('created', False) else 'updated'
tags = _database_tags(action, sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_delete, dispatch_uid='edxapp.monitoring.post_delete_metrics')
def post_delete_metrics(sender, **kwargs):
"""
Record the number of times that django models are deleted.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance being deleted (optional).
"""
tags = _database_tags('deleted', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(m2m_changed, dispatch_uid='edxapp.monitoring.m2m_changed_metrics')
def m2m_changed_metrics(sender, **kwargs):
"""
Record the number of times that Many2Many fields are updated. This is separated
from post_save and post_delete, because it's signaled by the database model in
the middle of the Many2Many relationship, rather than either of the models
that are the relationship participants.
Args:
sender (Model): The model class in the middle of the Many2Many relationship.
action (str): The action being taken on this Many2Many relationship.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance whose many-to-many relation is being modified.
model (Model class): The model of the class being added/removed/cleared from the relation.
"""
if 'action' not in kwargs:
return
action = {
'post_add': 'm2m.added',
'post_remove': 'm2m.removed',
'post_clear': 'm2m.cleared',
}.get(kwargs['action'])
if not action:
return
tags = _database_tags(action, sender, kwargs)
if 'model' in kwargs:
tags.append('target_class:{}'.format(kwargs['model'].__name__))
pk_set = kwargs.get('pk_set', []) or []
dog_stats_api.increment(
'edxapp.db.model',
value=len(pk_set),
tags=tags
)
| agpl-3.0 |
jinmm1992/moose | framework/contrib/nsiqcppstyle/rules/RULE_3_2_H_do_not_use_underbars_for_cpp_filename.py | 43 | 1450 | """
Do not use unberbars for cpp filename.
Only alphabets, numbers can be used for a cpp filename.
== Vilolation ==
/testdir/test_1.cpp <== Violation. - is used.
/testdir1/_test1.cpp <== Violation. _ is used
== Good ==
testdir/test.cpp
testdir1/test_1.c <== Don't care. it's c file.
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, filename, dirname) :
if Search("[_]", filename) and filename[filename.rfind("."):] in (".cpp", ".cxx") :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0,0), __name__,
"Do not use underbar for cpp file name (%s)." % filename)
ruleManager.AddFileStartRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunRule)
def test1(self):
self.Analyze("test/this_file.cpp", "")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisfile.cpp", "")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/this_file.c", "")
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
hasteur/UAABOT | wikipedia.py | 4 | 432713 | # -*- coding: utf-8 -*-
"""
Library to get and put pages on a MediaWiki wiki.
Contents of the library (objects and functions to be used outside)
Classes:
Page(site, title): A page on a MediaWiki site
DataPage(site, title): A Page object for the data repository.
ImagePage(site, title): An image descriptor Page
Site(lang, fam): A MediaWiki site
Factory functions:
Family(name): Import the named family
getSite(lang, fam): Return a Site instance
Exceptions:
Error: Base class for all exceptions in this module
NoUsername: Username is not in user-config.py
NoPage: Page does not exist on the wiki
NoSuchSite: Site does not exist
IsRedirectPage: Page is a redirect page
IsNotRedirectPage: Page is not a redirect page
LockedPage: Page is locked
SectionError: The section specified in the Page title does not exist
PageNotSaved: Saving the page has failed
EditConflict: PageNotSaved due to edit conflict while uploading
SpamfilterError: PageNotSaved due to MediaWiki spam filter
LongPageError: PageNotSaved due to length limit
ServerError: Got unexpected response from wiki server
BadTitle: Server responded with BadTitle
UserBlocked: Client's username or IP has been blocked
PageNotFound: Page not found in list
Objects:
get_throttle: Call to limit rate of read-access to wiki
put_throttle: Call to limit rate of write-access to wiki
Other functions:
getall(): Load a group of pages
handleArgs(): Process all standard command line arguments (such as
-family, -lang, -log and others)
translate(xx, dict): dict is a dictionary, giving text depending on
language, xx is a language. Returns the text in the most applicable
language for the xx: wiki
setAction(text): Use 'text' instead of "Wikipedia python library" in
edit summaries
setUserAgent(text): Sets the string being passed to the HTTP server as
the User-agent: header. The default is
'<script>/<revision> Pywikipediabot/1.0', where '<script>' is the tail
path component and file name of the currently executing script and
revision is the SVN revision of Pywikipediabot.
output(text): Prints the text 'text' in the encoding of the user's
console. **Use this instead of "print" statements**
stdout(text): Prints to stdout **Use this for script results only!**
warning(text): Prints warnings.
error(text): Prints errors.
log(text): Prints general log messages.
critical(text): Prints critical errors.
debug(text): Prints debug information.
debugDump(): Prints huge debug information.
exception(msg): Prints excpetions and tracebacks.
input(text): Asks input from the user, printing the text 'text' first.
inputChoice: Shows user a list of choices and returns user's selection.
showDiff(oldtext, newtext): Prints the differences between oldtext and
newtext on the screen
Wikitext manipulation functions: each of these takes a unicode string
containing wiki text as its first argument, and returns a modified version
of the text unless otherwise noted --
replaceExcept: replace all instances of 'old' by 'new', skipping any
instances of 'old' within comments and other special text blocks
removeDisabledParts: remove text portions exempt from wiki markup
isDisabled(text,index): return boolean indicating whether text[index] is
within a non-wiki-markup section of text
decodeEsperantoX: decode Esperanto text using the x convention.
encodeEsperantoX: convert wikitext to the Esperanto x-encoding.
findmarker(text, startwith, append): return a string which is not part
of text
expandmarker(text, marker, separator): return marker string expanded
backwards to include separator occurrences plus whitespace
Wikitext manipulation functions for interlanguage links:
getLanguageLinks(text,xx): extract interlanguage links from text and
return in a dict
removeLanguageLinks(text): remove all interlanguage links from text
removeLanguageLinksAndSeparator(text, site, marker, separator = ''):
remove language links, whitespace, preceeding separators from text
replaceLanguageLinks(oldtext, new): remove the language links and
replace them with links from a dict like the one returned by
getLanguageLinks
interwikiFormat(links): convert a dict of interlanguage links to text
(using same dict format as getLanguageLinks)
interwikiSort(sites, inSite): sorts a list of sites according to interwiki
sort preference of inSite.
url2link: Convert urlname of a wiki page into interwiki link format.
Wikitext manipulation functions for category links:
getCategoryLinks(text): return list of Category objects corresponding
to links in text
removeCategoryLinks(text): remove all category links from text
replaceCategoryLinksAndSeparator(text, site, marker, separator = ''):
remove language links, whitespace, preceeding separators from text
replaceCategoryLinks(oldtext,new): replace the category links in oldtext by
those in a list of Category objects
replaceCategoryInPlace(text,oldcat,newtitle): replace a single link to
oldcat with a link to category given by newtitle
categoryFormat(links): return a string containing links to all
Categories in a list.
Unicode utility functions:
UnicodeToAsciiHtml: Convert unicode to a bytestring using HTML entities.
url2unicode: Convert url-encoded text to unicode using a site's encoding.
unicode2html: Ensure unicode string is encodable; if not, convert it to
ASCII for HTML.
html2unicode: Replace HTML entities in text with unicode characters.
stopme(): Put this on a bot when it is not or not communicating with the Wiki
any longer. It will remove the bot from the list of running processes,
and thus not slow down other bot threads anymore.
"""
#
# (C) Pywikipedia bot team, 2003-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 13ee2ab95c63643cc78f2dd6a540f488d6175aa0 $'
import os
import sys
import cookielib
import httplib
import urllib
import urllib2
import pprint
import socket
import traceback
import time
import datetime
import threading
import Queue
import re
import codecs
import difflib
try:
from hashlib import md5
except ImportError: # Python 2.4 compatibility
from md5 import new as md5
import xml.sax
import xml.sax.handler
import htmlentitydefs
import warnings
import unicodedata
import xmlreader
import externals # allow imports from externals
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
import weakref
import logging
import logging.handlers
try:
import json
except ImportError:
import simplejson as json
# Splitting the bot into library parts
from pywikibot.support import *
import config
import login
import query
from pywikibot import version
# Check Unicode support (is this a wide or narrow python build?)
# See http://www.python.org/doc/peps/pep-0261/
try:
unichr(66365) # a character in th: alphabet, uses 32 bit encoding
WIDEBUILD = True
except ValueError:
WIDEBUILD = False
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
STDOUT = 16
VERBOSE = 18
INPUT = 25
# Format string for the default user agent.
USER_AGENT_FORMAT = '%(script)s/r%(versionrev)s Pywikipediabot/1.0'
SaxError = xml.sax._exceptions.SAXParseException
# Pre-compile re expressions
reNamespace = re.compile("^(.+?) *: *(.*)$")
Rwatch = re.compile(
r"<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"")
Rwatchlist = re.compile(r"<input tabindex='[\d]+' type='checkbox' "
r"name='wpWatchthis' checked='checked'")
Rlink = re.compile(r'\[\[(?P<title>[^\]\|\[]*)(\|[^\]]*)?\]\]')
# Page objects (defined here) represent the page itself, including its contents.
class Page(object):
"""Page: A MediaWiki page
Constructor has two required parameters:
1) The wiki Site on which the page resides [note that, if the
title is in the form of an interwiki link, the Page object may
have a different Site than this]
2) The title of the page as a unicode string
Optional parameters:
insite - the wiki Site where this link was found (to help decode
interwiki links)
defaultNamespace - A namespace to use if the link does not contain one
Methods available:
title : The name of the page, including namespace and
section if any
urlname : Title, in a form suitable for a URL
namespace : The namespace in which the page is found
section : The section of the page (the part of the title
after '#', if any)
sectionFreeTitle : Title, without the section part
site : The wiki this page is in
encoding : The encoding of the page
isAutoTitle : Title can be translated using the autoFormat method
autoFormat : Auto-format certain dates and other standard
format page titles
isCategory : True if the page is a category
isDisambig (*) : True if the page is a disambiguation page
isImage : True if the page is an image
isRedirectPage (*) : True if the page is a redirect, false otherwise
getRedirectTarget (*) : The page the page redirects to
isTalkPage : True if the page is in any "talk" namespace
toggleTalkPage : Return the talk page (if this is one, return the
non-talk page)
get (*) : The text of the page
getSections (*) : Retrieve page section heading and assign them to
the byte offset
latestRevision (*) : The page's current revision id
userName : Last user to edit page
userNameHuman : Last human (non-bot) user to edit page
isIpEdit : True if last editor was unregistered
editTime : Timestamp of the last revision to the page
previousRevision (*) : The revision id of the previous version
permalink (*) : The url of the permalink of the current version
getOldVersion(id) (*) : The text of a previous version of the page
getRestrictions : Returns a protection dictionary
getVersionHistory : Load the version history information from wiki
getVersionHistoryTable: Create a wiki table from the history data
fullVersionHistory : Return all past versions including wikitext
contributingUsers : Return set of users who have edited page
getCreator : Function to get the first editor of a page
getLatestEditors : Function to get the last editors of a page
exists (*) : True if the page actually exists, false otherwise
isEmpty (*) : True if the page has 4 characters or less content,
not counting interwiki and category links
interwiki (*) : The interwiki links from the page (list of Pages)
categories (*) : The categories the page is in (list of Pages)
linkedPages (*) : The normal pages linked from the page (list of
Pages)
imagelinks (*) : The pictures on the page (list of ImagePages)
templates (*) : All templates referenced on the page (list of
Pages)
templatesWithParams(*): All templates on the page, with list of parameters
getReferences : List of pages linking to the page
canBeEdited (*) : True if page is unprotected or user has edit
privileges
protection(*) : This page protection level
botMayEdit (*) : True if bot is allowed to edit page
put(newtext) : Saves the page
put_async(newtext) : Queues the page to be saved asynchronously
append(newtext) : Append to page section
watch : Add the page to the watchlist
unwatch : Remove the page from the watchlist
move : Move the page to another title
delete : Deletes the page (requires being logged in)
protect : Protect or unprotect a page (requires sysop status)
removeImage : Remove all instances of an image from this page
replaceImage : Replace all instances of an image with another
loadDeletedRevisions : Load all deleted versions of this page
getDeletedRevision : Return a particular deleted revision
markDeletedRevision : Mark a version to be undeleted, or not
undelete : Undelete past version(s) of the page
purgeCache : Purge page from server cache
(*) : This loads the page if it has not been loaded before; permalink might
even reload it if it has been loaded before
"""
def __init__(self, site, title, insite=None, defaultNamespace=0):
"""Instantiate a Page object.
"""
try:
# if _editrestriction is True, it means that the page has been found
# to have an edit restriction, but we do not know yet whether the
# restriction affects us or not
self._editrestriction = False
if site is None or isinstance(site, basestring):
site = getSite(site)
self._site = site
if not insite:
insite = site
# Clean up the name, it can come from anywhere.
# Convert HTML entities to unicode
t = html2unicode(title)
# Convert URL-encoded characters to unicode
# Sometimes users copy the link to a site from one to another.
# Try both the source site and the destination site to decode.
try:
t = url2unicode(t, site=insite, site2=site)
except UnicodeDecodeError:
raise InvalidTitle(u'Bad page title : %s' % t)
# Normalize unicode string to a NFC (composed) format to allow
# proper string comparisons. According to
# http://svn.wikimedia.org/viewvc/mediawiki/branches/REL1_6/phase3/includes/normal/UtfNormal.php?view=markup
# the mediawiki code normalizes everything to NFC, not NFKC
# (which might result in information loss).
t = unicodedata.normalize('NFC', t)
if u'\ufffd' in t:
raise InvalidTitle("Title contains illegal char (\\uFFFD)")
# Replace underscores by spaces
t = t.replace(u"_", u" ")
# replace multiple spaces a single space
while u" " in t:
t = t.replace(u" ", u" ")
# Strip spaces at both ends
t = t.strip()
# Remove left-to-right and right-to-left markers.
t = t.replace(u'\u200e', '').replace(u'\u200f', '')
if t.startswith(':'):
t = t[1:]
prefix = True
else:
prefix = False
self._namespace = defaultNamespace
#
# This code was adapted from Title.php : secureAndSplit()
#
# Namespace or interwiki prefix
while True:
m = reNamespace.match(t)
if not m:
# leading colon implies main namespace instead of default
if t.startswith(':'):
t = t[1:]
self._namespace = 0
elif prefix:
self._namespace = 0
else:
self._namespace = defaultNamespace
break
prefix = False
p = m.group(1)
lowerNs = p.lower()
ns = self._site.getNamespaceIndex(lowerNs)
if ns:
t = m.group(2)
self._namespace = ns
break
if lowerNs in self._site.family.langs.keys():
# Interwiki link
t = m.group(2)
# Redundant interwiki prefix to the local wiki
if lowerNs == self._site.lang:
if t == '':
raise Error("Can't have an empty self-link")
else:
self._site = getSite(lowerNs, self._site.family.name)
if t == '':
t = self._site.mediawiki_message('Mainpage')
elif lowerNs in self._site.family.get_known_families(
site=self._site):
if self._site.family.get_known_families(
site=self._site)[lowerNs] == self._site.family.name:
t = m.group(2)
else:
# This page is from a different family
if verbose:
output(u"Target link '%s' has different family '%s'"
% (title, lowerNs))
if self._site.family.name in ['commons', 'meta']:
#When the source wiki is commons or meta,
#w:page redirects you to w:en:page
otherlang = 'en'
else:
otherlang = self._site.lang
familyName = self._site.family.get_known_families(
site=self._site)[lowerNs]
if familyName in ['commons', 'meta']:
otherlang = familyName
try:
self._site = getSite(otherlang, familyName)
except ValueError:
raise NoPage(u"%s is not a local page on %s, and "
u"the %s family is not supported by "
u"PyWikipediaBot!"
% (title, self._site, familyName))
t = m.group(2)
else:
# If there's no recognized interwiki or namespace,
# then let the colon expression be part of the title.
break
if not t:
raise InvalidTitle(u"Invalid title '%s'" % title)
sectionStart = t.find(u'#')
# But maybe there are magic words like {{#time|}}
# TODO: recognize magic word and templates inside links
# see http://la.wikipedia.org/w/index.php?title=997_Priska&diff=prev&oldid=1038880
if sectionStart > 0:
# Categories does not have sections.
if self._namespace == 14:
raise InvalidTitle(u"Invalid section in category '%s'" % t)
else:
t, sec = t.split(u'#', 1)
self._section = sec.lstrip() or None
t = t.rstrip()
elif sectionStart == 0:
raise InvalidTitle(u"Invalid title starting with a #: '%s'" % t)
else:
self._section = None
if t:
if not self._site.nocapitalize:
t = t[:1].upper() + t[1:]
# reassemble the title from its parts
if self._namespace != 0:
t = u'%s:%s' % (self._site.namespace(self._namespace), t)
if self._section:
t += u'#' + self._section
self._title = t
self.editRestriction = None
self.moveRestriction = None
self._permalink = None
self._userName = None
self._comment = None
self._ipedit = None
self._editTime = None
self._startTime = '0'
self._page_id = None
# For the Flagged Revisions MediaWiki extension
self._revisionId = None
self._deletedRevs = None
except NoSuchSite:
raise
except:
if verbose:
output(u"Exception in Page constructor")
output(
u"site=%s, title=%s, insite=%s, defaultNamespace=%i"
% (site, title, insite, defaultNamespace)
)
raise
@property
def site(self):
"""Return the Site object for the wiki on which this Page resides."""
return self._site
@property
def image_repository(self):
"""Return the Site object for the image repository."""
return self.site.image_repository()
@property
def data_repository(self):
"""Return the Site object for the data repository."""
return self.site.data_repository()
def namespace(self):
"""Return the number of the namespace of the page.
Only recognizes those namespaces defined in family.py.
If not defined, it will return 0 (the main namespace).
"""
return self._namespace
def ID(self):
if self._page_id:
return self._page_id
else:
self.get()
return self._page_id
def encoding(self):
"""Return the character encoding used on this Page's wiki Site."""
return self._site.encoding()
@deprecate_arg("decode", None)
def title(self, underscore=False, savetitle=False, withNamespace=True,
withSection=True, asUrl=False, asLink=False,
allowInterwiki=True, forceInterwiki=False, textlink=False,
as_filename=False):
"""Return the title of this Page, as a Unicode string.
@param underscore: if true, replace all ' ' characters with '_'
@param withNamespace: if false, omit the namespace prefix
@param withSection: if false, omit the section
@param asUrl: - not implemented yet -
@param asLink: if true, return the title in the form of a wikilink
@param allowInterwiki: (only used if asLink is true) if true, format
the link as an interwiki link if necessary
@param forceInterwiki: (only used if asLink is true) if true, always
format the link as an interwiki link
@param textlink: (only used if asLink is true) if true, place a ':'
before Category: and Image: links
@param as_filename: - not implemented yet -
@param savetitle: if True, encode any wiki syntax in the title.
"""
title = self._title
if not withNamespace and self.namespace() != 0:
title = title.split(':', 1)[1]
if asLink:
iw_target_site = getSite()
iw_target_family = getSite().family
if iw_target_family.interwiki_forward:
iw_target_family = pywikibot.Family(
iw_target_family.interwiki_forward)
if allowInterwiki and (forceInterwiki or
self._site != iw_target_site):
colon = ""
if textlink:
colon = ":"
if self._site.family != iw_target_family \
and self._site.family.name != self._site.lang:
title = u'[[%s%s:%s:%s]]' % (colon,
self._site.family.name,
self._site.lang, title)
else:
title = u'[[%s%s:%s]]' % (colon, self._site.lang, title)
elif textlink and (self.isImage() or self.isCategory()):
title = u'[[:%s]]' % title
else:
title = u'[[%s]]' % title
if savetitle or asLink:
# Ensure there's no wiki syntax in the title
title = title.replace(u"''", u'%27%27')
if underscore:
title = title.replace(' ', '_')
if not withSection:
sectionName = self.section(underscore=underscore)
if sectionName:
title = title[:-len(sectionName) - 1]
return title
#@deprecated("Page.title(withNamespace=False)")
def titleWithoutNamespace(self, underscore=False):
"""Return title of Page without namespace and without section."""
return self.title(underscore=underscore, withNamespace=False,
withSection=False)
def titleForFilename(self):
"""
Return the title of the page in a form suitable for a filename on
the user's file system.
"""
result = self.title()
# Replace characters that are not possible in file names on some
# systems.
# Spaces are possible on most systems, but are bad for URLs.
for forbiddenChar in ':*?/\\ ':
result = result.replace(forbiddenChar, '_')
return result
@deprecate_arg("decode", None)
def section(self, underscore=False):
"""Return the name of the section this Page refers to.
The section is the part of the title following a '#' character, if
any. If no section is present, return None.
"""
section = self._section
if section and underscore:
section = section.replace(' ', '_')
return section
def sectionFreeTitle(self, underscore=False):
"""Return the title of this Page, without the section (if any)."""
sectionName = self.section(underscore=underscore)
title = self.title(underscore=underscore)
if sectionName:
return title[:-len(sectionName) - 1]
else:
return title
def urlname(self, withNamespace=True):
"""Return the Page title encoded for use in an URL."""
title = self.title(withNamespace=withNamespace, underscore=True)
encodedTitle = title.encode(self.site().encoding())
return urllib.quote(encodedTitle)
def __str__(self):
"""Return a console representation of the pagelink."""
return self.title(asLink=True, forceInterwiki=True
).encode(config.console_encoding,
"xmlcharrefreplace")
def __unicode__(self):
return self.title(asLink=True, forceInterwiki=True)
def __repr__(self):
"""Return a more complete string representation."""
return "%s{%s}" % (self.__class__.__name__,
self.title(asLink=True
).encode(config.console_encoding))
def __cmp__(self, other):
"""Test for equality and inequality of Page objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by namespace first, then by title.
"""
if not isinstance(other, Page):
# especially, return -1 if other is None
return -1
if self._site == other._site:
return cmp(self._title, other._title)
else:
return cmp(self._site, other._site)
def __hash__(self):
# Pseudo method that makes it possible to store Page objects as keys
# in hash-tables. This relies on the fact that the string
# representation of an instance can not change after the construction.
return hash(unicode(self))
@deprecated("Page.title(asLink=True)")
def aslink(self, forceInterwiki=False, textlink=False, noInterwiki=False):
"""Return a string representation in the form of a wikilink.
If forceInterwiki is True, return an interwiki link even if it
points to the home wiki. If False, return an interwiki link only if
needed.
If textlink is True, always return a link in text form (that is,
interwiki links and internal links to the Category: and Image:
namespaces will be preceded by a : character).
DEPRECATED to merge to rewrite branch:
use self.title(asLink=True) instead.
"""
return self.title(asLink=True, forceInterwiki=forceInterwiki,
allowInterwiki=not noInterwiki, textlink=textlink)
def autoFormat(self):
"""Return (dictName, value) if title is in date.autoFormat dictionary.
Value can be a year, date, etc., and dictName is 'YearBC',
'Year_December', or another dictionary name. Please note that two
entries may have exactly the same autoFormat, but be in two
different namespaces, as some sites have categories with the
same names. Regular titles return (None, None).
"""
if not hasattr(self, '_autoFormat'):
import date
self._autoFormat = date.getAutoFormat(self.site().language(),
self.title(
withNamespace=False))
return self._autoFormat
def isAutoTitle(self):
"""Return True if title of this Page is in the autoFormat dictionary."""
return self.autoFormat()[0] is not None
def get(self, force=False, get_redirect=False, throttle=True,
sysop=False, change_edit_time=True, expandtemplates=False):
"""Return the wiki-text of the page.
This will retrieve the page from the server if it has not been
retrieved yet, or if force is True. This can raise the following
exceptions that should be caught by the calling code:
@exception NoPage The page does not exist
@exception IsRedirectPage The page is a redirect. The argument of the
exception is the title of the page it
redirects to.
@exception SectionError The section does not exist on a page with
a # link
@param force reload all page attributes, including errors.
@param get_redirect return the redirect text, do not follow the
redirect, do not raise an exception.
@param sysop if the user has a sysop account, use it to
retrieve this page
@param change_edit_time if False, do not check this version for
changes before saving. This should be used only
if the page has been loaded previously.
@param expandtemplates all templates in the page content are fully
resolved too (if API is used).
"""
# NOTE: The following few NoPage exceptions could already be thrown at
# the Page() constructor. They are raised here instead for convenience,
# because all scripts are prepared for NoPage exceptions raised by
# get(), but not for such raised by the constructor.
# \ufffd represents a badly encoded character, the other characters are
# disallowed by MediaWiki.
for illegalChar in u'#<>[]|{}\n\ufffd':
if illegalChar in self.sectionFreeTitle():
if verbose:
output(u'Illegal character in %s!'
% self.title(asLink=True))
raise NoPage('Illegal character in %s!'
% self.title(asLink=True))
if self.namespace() == -1:
raise NoPage('%s is in the Special namespace!'
% self.title(asLink=True))
if self.site().isInterwikiLink(self.title()):
raise NoPage('%s is not a local page on %s!'
% (self.title(asLink=True), self.site()))
if force:
# When forcing, we retry the page no matter what:
# * Old exceptions and contents do not apply any more
# * Deleting _contents and _expandcontents to force reload
for attr in ['_redirarg', '_getexception',
'_contents', '_expandcontents',
'_sections']:
if hasattr(self, attr):
delattr(self, attr)
else:
# Make sure we re-raise an exception we got on an earlier attempt
if hasattr(self, '_redirarg') and not get_redirect:
raise IsRedirectPage, self._redirarg # do not change unless you see https://bugzilla.wikimedia.org/show_bug.cgi?id=57879
elif hasattr(self, '_getexception'):
if self._getexception == IsRedirectPage and get_redirect:
pass
else:
raise self._getexception
# Make sure we did try to get the contents once
if expandtemplates:
attr = '_expandcontents'
else:
attr = '_contents'
if not hasattr(self, attr):
try:
contents = self._getEditPage(get_redirect=get_redirect,
throttle=throttle, sysop=sysop,
expandtemplates=expandtemplates)
if expandtemplates:
self._expandcontents = contents
else:
self._contents = contents
hn = self.section()
if hn:
m = re.search("=+[ ']*%s[ ']*=+" % re.escape(hn),
self._contents)
if verbose and not m:
warning(u"Section does not exist: %s" % self)
# Store any exceptions for later reference
except NoPage:
self._getexception = NoPage
raise
except IsRedirectPage, arg:
self._getexception = IsRedirectPage
self._redirarg = arg
if not get_redirect:
raise
except SectionError:
self._getexception = SectionError
raise
except UserBlocked:
if self.site().loggedInAs(sysop=sysop):
raise UserBlocked(self.site(), unicode(self))
else:
if verbose:
output("The IP address is blocked, retry by login.")
self.site().forceLogin(sysop=sysop)
return self.get(force, get_redirect, throttle, sysop,
change_edit_time)
if expandtemplates:
return self._expandcontents
return self._contents
def _getEditPage(self, get_redirect=False, throttle=True, sysop=False,
oldid=None, change_edit_time=True, expandtemplates=False):
"""Get the contents of the Page via API query
Do not use this directly, use get() instead.
Arguments:
oldid - Retrieve an old revision (by id), not the current one
get_redirect - Get the contents, even if it is a redirect page
expandtemplates - Fully resolve templates within page content
(if API is used)
This method returns the raw wiki text as a unicode string.
"""
if not self.site().has_api() or self.site().versionnumber() < 12:
return self._getEditPageOld(get_redirect, throttle, sysop, oldid,
change_edit_time)
params = {
'action': 'query',
'titles': self.title(),
'prop': ['revisions', 'info'],
'rvprop': ['content', 'ids', 'flags', 'timestamp', 'user',
'comment', 'size'],
'rvlimit': 1,
# 'talkid' valid for release > 1.12
# 'url', 'readable' valid for release > 1.14
'inprop': ['protection', 'subjectid'],
# 'intoken': 'edit',
}
if oldid:
params['rvstartid'] = oldid
if expandtemplates:
params[u'rvexpandtemplates'] = u''
if throttle:
get_throttle()
textareaFound = False
# retrying loop is done by query.GetData
data = query.GetData(params, self.site(), sysop=sysop)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if not 'pages' in data['query']:
raise RuntimeError("API query error, no pages found: %s" % data)
pageInfo = data['query']['pages'].values()[0]
if data['query']['pages'].keys()[0] == "-1":
if 'missing' in pageInfo:
raise NoPage(self.site(), unicode(self), "Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
elif 'revisions' in pageInfo: # valid Title
lastRev = pageInfo['revisions'][0]
if 'textmissing' in lastRev:
# Maybe we could use a new error exception.
# Now we just pass and got a server error
pass
elif isinstance(lastRev['*'], basestring):
textareaFound = True
# I got page date with 'revisions' in pageInfo but
# lastRev['*'] = False instead of the content. The Page itself was
# deleted but there was not 'missing' in pageInfo as expected
# I raise a ServerError() yet, but maybe it should be NoPage().
if not textareaFound:
if verbose:
output(unicode(pageInfo))
raise ServerError('ServerError: No textarea found in %s' % self)
self._page_id = pageInfo['pageid']
self.editRestriction = ''
self.moveRestriction = ''
# Note: user may be hidden and mw returns 'userhidden' flag
if 'userhidden' in lastRev:
self._userName = None
else:
self._userName = lastRev['user']
self._ipedit = 'anon' in lastRev
try:
self._comment = lastRev['comment']
except KeyError:
self._comment = None
for restr in pageInfo['protection']:
if restr['type'] == 'edit':
self.editRestriction = restr['level']
elif restr['type'] == 'move':
self.moveRestriction = restr['level']
self._revisionId = lastRev['revid']
if change_edit_time:
self._editTime = parsetime2stamp(lastRev['timestamp'])
if "starttimestamp" in pageInfo:
self._startTime = parsetime2stamp(pageInfo["starttimestamp"])
self._isWatched = False # cannot handle in API in my research for now.
pagetext = lastRev['*']
pagetext = pagetext.rstrip()
# pagetext must not decodeEsperantoX() if loaded via API
m = self.site().redirectRegex().match(pagetext)
if m:
# page text matches the redirect pattern
if self.section() and not "#" in m.group(1):
redirtarget = "%s#%s" % (m.group(1), self.section())
else:
redirtarget = m.group(1)
if get_redirect:
self._redirarg = redirtarget
else:
raise IsRedirectPage(redirtarget)
if self.section() and \
not does_text_contain_section(pagetext, self.section()):
try:
self._getexception
except AttributeError:
raise SectionError # Page has no section by this name
return pagetext
def _getEditPageOld(self, get_redirect=False, throttle=True, sysop=False,
oldid=None, change_edit_time=True):
"""Get the contents of the Page via the edit page."""
if verbose:
output(u'Getting page %s' % self.title(asLink=True))
path = self.site().edit_address(self.urlname())
if oldid:
path += "&oldid=" + oldid
# Make sure Brion doesn't get angry by waiting if the last time a page
# was retrieved was not long enough ago.
if throttle:
get_throttle()
textareaFound = False
retry_idle_time = 1
while not textareaFound:
text = self.site().getUrl(path, sysop=sysop)
if "<title>Wiki does not exist</title>" in text:
raise NoSuchSite(u'Wiki %s does not exist yet' % self.site())
# Extract the actual text from the textarea
m1 = re.search('<textarea([^>]*)>', text)
m2 = re.search('</textarea>', text)
if m1 and m2:
i1 = m1.end()
i2 = m2.start()
textareaFound = True
else:
# search for messages with no "view source"
# (aren't used in new versions)
if self.site().mediawiki_message('whitelistedittitle') in text:
raise NoPage(u'Page editing is forbidden for anonymous '
u'users.')
elif (self.site().has_mediawiki_message('nocreatetitle') and
self.site().mediawiki_message('nocreatetitle') in text):
raise NoPage(self.site(), unicode(self))
# Bad title
elif 'var wgPageName = "Special:Badtitle";' in text or \
self.site().mediawiki_message('badtitle') in text:
raise BadTitle('BadTitle: %s' % self)
# find out if the username or IP has been blocked
elif self.site().isBlocked():
raise UserBlocked(self.site(), unicode(self))
# If there is no text area and the heading is 'View Source'
# but user is not blocked, the page does not exist, and is
# locked
elif self.site().mediawiki_message('viewsource') in text:
raise NoPage(self.site(), unicode(self))
# Some of the newest versions don't have a "view source" tag for
# non-existant pages
# Check also the div class because if the language is not
# english
# the bot can not seeing that the page is blocked.
elif self.site().mediawiki_message(
'badaccess') in text or \
"<div class=\"permissions-errors\">" in text:
raise NoPage(self.site(), unicode(self))
elif config.retry_on_fail:
if "<title>Wikimedia Error</title>" in text:
output(u"Wikimedia has technical problems; will retry "
u"in %i minutes." % retry_idle_time)
else:
output(unicode(text))
# We assume that the server is down. Wait some time,
# then try again.
warning(u"No text area found on %s%s. Maybe the server "
u"is down. Retrying in %i minutes..."
% (self.site().hostname(), path,
retry_idle_time))
time.sleep(retry_idle_time * 60)
# Next time wait longer, but not longer than half an hour
retry_idle_time *= 2
if retry_idle_time > 30:
retry_idle_time = 30
else:
output(u"Failed to access wiki")
sys.exit(1)
# Check for restrictions
m = re.search('var wgRestrictionEdit = \\["(\w+)"\\]', text)
if m:
if verbose:
debug(u"page is locked for group %s" % m.group(1))
self.editRestriction = m.group(1)
else:
self.editRestriction = ''
m = re.search('var wgRestrictionMove = \\["(\w+)"\\]', text)
if m:
self.moveRestriction = m.group(1)
else:
self.moveRestriction = ''
m = re.search('name=["\']baseRevId["\'] type=["\']hidden["\'] '
'value="(\d+)"', text)
if m:
self._revisionId = m.group(1)
if change_edit_time:
# Get timestamps
m = re.search('value="(\d+)" name=["\']wpEdittime["\']', text)
if m:
self._editTime = m.group(1)
else:
self._editTime = "0"
m = re.search('value="(\d+)" name=["\']wpStarttime["\']', text)
if m:
self._startTime = m.group(1)
else:
self._startTime = "0"
# Find out if page actually exists. Only existing pages have a
# version history tab.
if self.site().family.RversionTab(self.site().language()):
# In case a family does not have version history tabs, or in
# another form
RversionTab = re.compile(
self.site().family.RversionTab(self.site().language()))
else:
RversionTab = re.compile(r'<li id="ca-history"><a href=".*?'
r'title=.*?&action=history".*?>'
r'.*?</a></li>',
re.DOTALL)
matchVersionTab = RversionTab.search(text)
if not matchVersionTab and not self.site().family.name == 'wikitravel':
raise NoPage(self.site(), unicode(self),
"Page does not exist. In rare cases, if you are "
"certain the page does exist, look into overriding "
"family.RversionTab")
# Look if the page is on our watchlist
matchWatching = Rwatchlist.search(text)
if matchWatching:
self._isWatched = True
else:
self._isWatched = False
# Now process the contents of the textarea
# Unescape HTML characters, strip whitespace
pagetext = text[i1:i2]
pagetext = unescape(pagetext)
pagetext = pagetext.rstrip()
if self.site().lang == 'eo':
pagetext = decodeEsperantoX(pagetext)
m = self.site().redirectRegex().match(pagetext)
if m:
# page text matches the redirect pattern
if self.section() and not "#" in m.group(1):
redirtarget = "%s#%s" % (m.group(1), self.section())
else:
redirtarget = m.group(1)
if get_redirect:
self._redirarg = redirtarget
else:
raise IsRedirectPage(redirtarget)
if self.section() and \
not does_text_contain_section(text, self.section()):
try:
self._getexception
except AttributeError:
raise SectionError # Page has no section by this name
return pagetext
def getOldVersion(self, oldid, force=False, get_redirect=False,
throttle=True, sysop=False, change_edit_time=True):
"""Return text of an old revision of this page; same options as get().
@param oldid: The revid of the revision desired.
"""
# TODO: should probably check for bad pagename, NoPage, and other
# exceptions that would prevent retrieving text, as get() does
# TODO: should this default to change_edit_time = False? If we're not
# getting the current version, why change the timestamps?
return self._getEditPage(
get_redirect=get_redirect, throttle=throttle,
sysop=sysop, oldid=oldid,
change_edit_time=change_edit_time
)
## @since r10309
# @remarks needed by various bots
def getSections(self, minLevel=2, sectionsonly=False, force=False):
"""Parses the page with API and return section information.
@param minLevel: The minimal level of heading for section to be
reported.
@type minLevel: int
@param sectionsonly: Report only the result from API call, do not assign
the headings to wiki text (for compression e.g.).
@type sectionsonly: bool
@param force: Use API for full section list resolution, works always but
is extremely slow, since each single section has to be
retrieved.
@type force: bool
Returns a list with entries: (byteoffset, level, wikiline, line, anchor)
This list may be empty and if sections are embedded by template, the
according byteoffset and wikiline entries are None. The wikiline is the
wiki text, line is the parsed text and anchor ist the (unique) link
label.
"""
# replace 'byteoffset' ALWAYS by self calculated, since parsed does not
# match wiki text
# bug fix; JIRA: DRTRIGON-82
# was there already a call? already some info available?
if hasattr(self, '_sections'):
return self._sections
# Old exceptions and contents do not apply any more.
for attr in ['_sections']:
if hasattr(self, attr):
delattr(self, attr)
# call the wiki to get info
params = {
u'action': u'parse',
u'page': self.title(),
u'prop': u'sections',
}
get_throttle()
output(u"Reading section info from %s via API..."
% self.title(asLink=True))
result = query.GetData(params, self.site())
# JIRA: DRTRIGON-90; catch and convert error (convert it such that the
# whole page gets processed later)
try:
r = result[u'parse'][u'sections']
except KeyError: # sequence of sometimes occuring "KeyError: u'parse'"
warning(u'Query result (gS): %r' % result)
raise Error('Problem occured during data retrieval for sections in '
'%s!' % self.title(asLink=True))
#debug_data = str(r) + '\n'
debug_data = str(result) + '\n'
if not sectionsonly:
# assign sections with wiki text and section byteoffset
## pywikibot.output(u" Reading wiki page text (if not already done).")
debug_data += str(len(self.__dict__.get('_contents', u''))) + '\n'
self.get()
debug_data += str(len(self._contents)) + '\n'
debug_data += self._contents + '\n'
# code debugging
if verbose:
debugDump('Page.getSections', self.site, 'Page.getSections',
debug_data.encode(config.textfile_encoding))
# 0.6 is default upper border
for setting in [(0.05, 0.95), (0.4, 0.8), (0.05, 0.8), (0.0, 0.8)]:
try:
pos = 0
for i, item in enumerate(r):
item[u'level'] = int(item[u'level'])
# byteoffset may be 0; 'None' means template
#if (item[u'byteoffset'] != None) and item[u'line']:
# (empty index means also template - workaround for bug:
# https://bugzilla.wikimedia.org/show_bug.cgi?id=32753)
if (item[u'byteoffset'] is not None) and \
item[u'line'] and item[u'index']:
# section on this page and index in format u"%i"
self._getSectionByteOffset(
item, pos, force,
cutoff=setting) # raises 'Error' unsucessfull!
pos = item[u'wikiline_bo'] + len(item[u'wikiline'])
item[u'byteoffset'] = item[u'wikiline_bo']
else:
# section embedded from template (index in format
# u"T-%i") or the parser was not able to recongnize
# section correct (e.g. html) at all
# (the byteoffset, index, ... may be correct or not)
item[u'wikiline'] = None
r[i] = item
break
except Error:
pos = None
if pos is None:
raise # re-raise
# check min. level
data = []
for item in r:
if item[u'level'] < minLevel:
continue
data.append(item)
r = data
# prepare resulting data
self._sections = [(item[u'byteoffset'], item[u'level'],
item[u'wikiline'], item[u'line'], item[u'anchor'])
for item in r]
return self._sections
## @since r10309
# @remarks needed by Page.getSections()
def _getSectionByteOffset(self, section, pos, force=False,
cutoff=(0.05, 0.95)):
"""determine the byteoffset of the given section (can be slow due
another API call).
"""
wikitextlines = self._contents[pos:].splitlines()
possible_headers = []
#print section[u'line']
if not force:
# how the heading should look like (re)
l = section[u'level']
headers = [u'^(\s*)%(spacer)s(.*?)%(spacer)s(\s*)((<!--(.*?)-->)?)'
u'(\s*)$'
% {'line': section[u'line'], 'spacer': u'=' * l},
u'^(\s*)<h%(level)i>(.*?)</h%(level)i>(.*?)$'
% {'line': section[u'line'], 'level': l}, ]
# try to give exact match for heading (remove HTML comments)
for h in headers:
## ph = re.search(h,
## pywikibot.removeDisabledParts(
## self._contents[pos:]), re.M)
ph = re.search(h, self._contents[pos:], re.M)
if ph:
ph = ph.group(0).strip()
possible_headers += [(ph, section[u'line'])]
# how the heading could look like (difflib)
headers = [u'%(spacer)s %(line)s %(spacer)s'
% {'line': section[u'line'], 'spacer': u'=' * l},
u'<h%(level)i>%(line)s</h%(level)i>'
% {'line': section[u'line'], 'level': l}, ]
# give possible match for heading
# http://stackoverflow.com/questions/2923420/fuzzy-string-matching-algorithm-in-python
# http://docs.python.org/library/difflib.html
# (http://mwh.geek.nz/2009/04/26/python-damerau-levenshtein-distance/)
for h in headers:
# cutoff=0.6 (default)
ph = difflib.get_close_matches(h, wikitextlines,
cutoff=cutoff[1])
possible_headers += [(p, section[u'line']) for p in ph]
#print h, possible_headers
if not possible_headers and section[u'index']:
# nothing found, try 'prop=revisions (rv)'
# call the wiki to get info
params = {
u'action': u'query',
u'titles': self.title(),
u'prop': u'revisions',
u'rvprop': u'content',
u'rvsection': section[u'index'],
}
get_throttle()
output(u" Reading section %s from %s via API..."
% (section[u'index'], self.title(asLink=True)))
result = query.GetData(params, self.site())
# JIRA: DRTRIGON-90; catch and convert error (convert it such that
# the whole page gets processed later)
try:
r = result[u'query'][u'pages'].values()[0]
pl = r[u'revisions'][0][u'*'].splitlines()
# sequence of sometimes occuring "KeyError: u'parse'"
except KeyError:
warning(u'Query result (gSBO): %r' % result)
raise Error('Problem occured during data retrieval for '
'sections in %s!' % self.title(asLink=True))
if pl:
possible_headers = [(pl[0], pl[0])]
# find the most probable match for heading
#print possible_headers
best_match = (0.0, None)
for i, (ph, header) in enumerate(possible_headers):
## print u' ', i, difflib.SequenceMatcher(None, header,
## ph).ratio(), header, ph
mr = difflib.SequenceMatcher(None, header, ph).ratio()
if mr >= best_match[0]:
best_match = (mr, ph)
if (i in [0, 1]) and (mr >= cutoff[0]):
break # use first (exact; re) match directly (if good enough)
## print u' ', best_match
# prepare resulting data
section[u'wikiline'] = best_match[1]
section[u'wikiline_mq'] = best_match[0] # match quality
section[u'wikiline_bo'] = -1 # byteoffset
if section[u'wikiline']:
section[u'wikiline_bo'] = self._contents.find(section[u'wikiline'],
pos)
if section[u'wikiline_bo'] < 0: # nothing found, report/raise error !
#page._getexception = ...
raise Error('Problem occured during attempt to retrieve and '
'resolve sections in %s!' % self.title(asLink=True))
#output(...)
# (or create a own error, e.g. look into interwiki.py)
def permalink(self, oldid=None):
"""Return the permalink URL of an old revision of this page.
@param oldid: The revid of the revision desired.
"""
return "%s://%s%s&oldid=%i" % (self.site().protocol(),
self.site().hostname(),
self.site().get_address(self.title()),
(oldid if oldid is not None
else self.latestRevision()))
def latestRevision(self):
"""Return the current revision id for this page."""
if not self._permalink:
# When we get the page with getall, the permalink is received
# automatically
getall(self.site(), [self], force=True)
# Check for exceptions
if hasattr(self, '_getexception'):
raise self._getexception
return int(self._permalink)
def userName(self):
"""Return name or IP address of last user to edit page.
Returns None unless page was retrieved with getAll().
"""
return self._userName
## @since r10310
# @remarks needed by various bots
def userNameHuman(self):
"""Return name or IP address of last human/non-bot user to edit page.
Returns the most recent human editor out of the last revisions
(optimal used with getAll()). If it was not able to retrieve a
human user returns None.
"""
# was there already a call? already some info available?
if hasattr(self, '_userNameHuman'):
return self._userNameHuman
# get history (use preloaded if available)
(revid, timestmp, username, comment
) = self.getVersionHistory(revCount=1)[0][:4]
# is the last/actual editor already a human?
import botlist # like watchlist
if not botlist.isBot(username):
self._userNameHuman = username
return username
# search the last human
self._userNameHuman = None
for vh in self.getVersionHistory()[1:]:
(revid, timestmp, username, comment) = vh[:4]
if username and (not botlist.isBot(username)):
# user is a human (not a bot)
self._userNameHuman = username
break
# store and return info
return self._userNameHuman
def isIpEdit(self):
"""Return True if last editor was unregistered.
Returns None unless page was retrieved with getAll() or _getEditPage().
"""
return self._ipedit
def editTime(self, datetime=False):
"""Return timestamp (in MediaWiki format) of last revision to page.
Returns None unless page was retrieved with getAll() or _getEditPage().
"""
if self._editTime and datetime:
return datetime.datetime.strptime(str(self._editTime),
'%Y%m%d%H%M%S')
return self._editTime
def previousRevision(self):
"""Return the revision id for the previous revision of this Page."""
vh = self.getVersionHistory(revCount=2)
return vh[1][0]
def exists(self):
"""Return True if page exists on the wiki, even if it's a redirect.
If the title includes a section, return False if this section isn't
found.
"""
try:
self.get()
except NoPage:
return False
except IsRedirectPage:
return True
except SectionError:
return False
return True
def pageAPInfo(self):
"""Return the last revid if page exists on the wiki,
Raise IsRedirectPage if it's a redirect
Raise NoPage if the page doesn't exist
Using the API should be a lot faster.
Function done in order to improve the scripts performance.
"""
params = {
'action': 'query',
'prop': 'info',
'titles': self.title(),
}
data = query.GetData(params, self.site)['query']['pages'].values()[0]
if 'redirect' in data:
raise IsRedirectPage
elif 'missing' in data:
raise NoPage
elif 'lastrevid' in data:
return data['lastrevid'] # if ok, return the last revid
else:
# should not exists, OR we have problems.
# better double check in this situations
x = self.get()
return True # if we reach this point, we had no problems.
def getTemplates(self, tllimit=5000):
#action=query&prop=templates&titles=Main Page
"""
Returns the templates that are used in the page given by API.
If no templates found, returns None.
"""
params = {
'action': 'query',
'prop': 'templates',
'titles': self.title(),
'tllimit': tllimit,
}
if tllimit > config.special_page_limit:
params['tllimit'] = config.special_page_limit
if tllimit > 5000 and self.site.isAllowed('apihighlimits'):
params['tllimit'] = 5000
tmpsFound = []
count = 0
while True:
data = query.GetData(params,
self.site)['query']['pages'].values()[0]
if "templates" not in data:
return []
for tmp in data['templates']:
count += 1
tmpsFound.append(Page(self.site(), tmp['title'],
defaultNamespace=tmp['ns']))
if count >= tllimit:
break
if 'query-continue' in data and count < tllimit:
params.update(data["query-continue"]["templates"])
else:
break
return tmpsFound
def isRedirectPage(self):
"""Return True if this is a redirect, False if not or not existing."""
try:
self.get()
except NoPage:
return False
except IsRedirectPage:
return True
except SectionError:
return False
return False
def isStaticRedirect(self, force=False):
"""Return True if this is a redirect containing the magic word
__STATICREDIRECT__, False if not or not existing.
"""
found = False
if self.isRedirectPage() and self.site().versionnumber() > 13:
staticKeys = self.site().getmagicwords('staticredirect')
text = self.get(get_redirect=True, force=force)
if staticKeys:
for key in staticKeys:
if key in text:
found = True
break
return found
def isCategoryRedirect(self, text=None):
"""Return True if this is a category redirect page, False otherwise."""
if not self.isCategory():
return False
if not hasattr(self, "_catredirect"):
if not text:
try:
text = self.get(get_redirect=True)
except NoPage:
return False
catredirs = self.site().category_redirects()
for (t, args) in self.templatesWithParams(thistxt=text):
template = Page(self.site(), t, defaultNamespace=10
).title(withNamespace=False) # normalize title
if template in catredirs:
# Get target (first template argument)
if not args:
warning(u'redirect target for %s is missing'
% self.title(asLink=True))
self._catredirect = False
else:
self._catredirect = "%s:%s" % (self.site.namespace(14),
args[0])
break
else:
self._catredirect = False
return bool(self._catredirect)
def getCategoryRedirectTarget(self):
"""If this is a category redirect, return the target category title."""
if self.isCategoryRedirect():
import catlib
return catlib.Category(self.site(), self._catredirect)
raise IsNotRedirectPage
def isEmpty(self):
"""Return True if the page text has less than 4 characters.
Character count ignores language links and category links.
Can raise the same exceptions as get().
"""
txt = self.get()
txt = removeLanguageLinks(txt, site=self.site())
txt = removeCategoryLinks(txt, site=self.site())
if len(txt) < 4:
return True
else:
return False
def isTalkPage(self):
"""Return True if this page is in any talk namespace."""
ns = self.namespace()
return ns >= 0 and ns % 2 == 1
def toggleTalkPage(self):
"""Return other member of the article-talk page pair for this Page.
If self is a talk page, returns the associated content page;
otherwise, returns the associated talk page.
Returns None if self is a special page.
"""
ns = self.namespace()
if ns < 0: # Special page
return
if self.isTalkPage():
ns -= 1
else:
ns += 1
if ns == 6:
return ImagePage(self.site(), self.title(withNamespace=False))
return Page(self.site(), self.title(withNamespace=False),
defaultNamespace=ns)
def isCategory(self):
"""Return True if the page is a Category, False otherwise."""
return self.namespace() == 14
def isImage(self):
"""Return True if this is an image description page, False otherwise."""
return self.namespace() == 6
def isDisambig(self, get_Index=True):
"""Return True if this is a disambiguation page, False otherwise."""
if not hasattr(self, "_isDisambig"):
extensions = self._site.siteinfo('extensions')
namesofextensions = []
for extension in extensions:
namesofextensions.append(extension['name'])
if not u'Disambiguator' in namesofextensions:
return self._isDisambig_disambiguationspage(get_Index)
else:
return self._isDisambig_disambiguator(get_Index)
else:
return self._isDisambig
def _isDisambig_disambiguator(self, get_Index=True):
params = {
'action': 'query',
'titles': self.title(),
'prop': 'pageprops',
'ppprop': 'disambiguation'
}
data = query.GetData(params, self._site)['query']['pages'].values()[0]
self._isDisambig = False
if u'missing' in data:
raise NoPage('Page %s does not exist' % self.title(asLink=True))
if u'pageprops' in data:
if u'disambiguation' in data[u'pageprops']:
self._isDisambig = True
return self._isDisambig
def _isDisambig_disambiguationspage(self, get_Index=True):
"""Relies on the presence of specific templates, identified in
the Family file or on a wiki page, to identify disambiguation
pages.
By default, loads a list of template names from the Family file;
if the value in the Family file is None no entry was made, looks for
the list on [[MediaWiki:Disambiguationspage]]. If this page does not
exist, take the mediawiki message.
If get_Index is True then also load the templates for index articles
which are given on en-wiki
Template:Disambig is always assumed to be default, and will be
appended regardless of its existence.
"""
if not hasattr(self, "_isDisambig"):
if not hasattr(self._site, "_disambigtemplates"):
try:
default = set(self._site.family.disambig('_default'))
except KeyError:
default = set([u'Disambig'])
try:
distl = self._site.family.disambig(self._site.lang,
fallback=False)
except KeyError:
distl = None
if distl is None:
try:
disambigpages = Page(self._site,
"MediaWiki:Disambiguationspage")
disambigs = set(link.title(withNamespace=False)
for link in disambigpages.linkedPages()
if link.namespace() == 10)
# add index article templates
if get_Index and \
self._site.sitename() == 'wikipedia:en':
regex = re.compile('\(\((.+?)\)\)')
content = disambigpages.get()
for index in regex.findall(content):
disambigs.add(index[:1].upper() + index[1:])
except NoPage:
message = self._site.mediawiki_message(
'Disambiguationspage').split(':', 1)[1]
# add the default template(s) for default mw message
# only
disambigs = set([message[:1].upper() +
message[1:]]) | default
self._site._disambigtemplates = disambigs
else:
# Normalize template capitalization
self._site._disambigtemplates = set(
t[:1].upper() + t[1:] for t in distl
)
disambigInPage = self._site._disambigtemplates.intersection(
self.templates())
self._isDisambig = (self.namespace() != 10 and
len(disambigInPage) > 0)
return self._isDisambig
def canBeEdited(self):
"""Return bool indicating whether this page can be edited.
This returns True if and only if:
- page is unprotected, and bot has an account for this site, or
- page is protected, and bot has a sysop account for this site.
"""
try:
self.get()
except:
pass
if self.editRestriction == 'sysop':
userdict = config.sysopnames
else:
userdict = config.usernames
try:
userdict[self.site().family.name][self.site().lang]
return True
except:
# We don't have a user account for that wiki, or the
# page is locked and we don't have a sysop account.
return False
def botMayEdit(self, username):
"""Return True if this page allows bots to edit it.
This will be True if the page doesn't contain {{bots}} or
{{nobots}}, or it contains them and the active bot is allowed to
edit this page. (This method is only useful on those sites that
recognize the bot-exclusion protocol; on other sites, it will always
return True.)
The framework enforces this restriction by default. It is possible
to override this by setting ignore_bot_templates=True in
user-config.py, or using page.put(force=True).
"""
if self.site().family.name == 'wikitravel': # Wikitravel's bot control.
self.site().family.bot_control(self.site())
if config.ignore_bot_templates: # Check the "master ignore switch"
return True
try:
templates = self.templatesWithParams(get_redirect=True)
except (NoPage, IsRedirectPage, SectionError):
return True
# go through all templates and look for any restriction
# multiple bots/nobots templates are allowed
for template in templates:
title = template[0].title()
if title == 'Nobots':
if len(template[1]) == 0:
return False
else:
bots = template[1][0].split(',')
if 'all' in bots or calledModuleName() in bots \
or username in bots:
return False
elif title == 'Bots':
if len(template[1]) == 0:
return True
else:
(ttype, bots) = template[1][0].split('=', 1)
bots = bots.split(',')
if ttype == 'allow':
return 'all' in bots or username in bots
if ttype == 'deny':
return not ('all' in bots or username in bots)
if ttype == 'allowscript':
return 'all' in bots or calledModuleName() in bots
if ttype == 'denyscript':
return not ('all' in bots or calledModuleName() in bots)
# no restricting template found
return True
def comment(self):
"""Return comment of last edit.
Returns None unless page was retrieved with getAll().
"""
return self._comment
def getReferences(self, follow_redirects=True, withTemplateInclusion=True,
onlyTemplateInclusion=False, redirectsOnly=False,
internal=False):
"""Yield all pages that link to the page by API
If you need a full list of referring pages, use this:
pages = list(s.getReferences())
Parameters:
* follow_redirects - if True, also returns pages that link to a
redirect pointing to the page.
* withTemplateInclusion - if True, also returns pages where self is
used as a template.
* onlyTemplateInclusion - if True, only returns pages where self is
used as a template.
* redirectsOnly - if True, only returns redirects to self.
"""
if not self.site().has_api():
for s in self.getReferencesOld(follow_redirects,
withTemplateInclusion,
onlyTemplateInclusion,
redirectsOnly):
yield s
return
params = {
'action': 'query',
'list': [],
}
if not onlyTemplateInclusion:
params['list'].append('backlinks')
params['bltitle'] = self.title()
params['bllimit'] = config.special_page_limit
params['blfilterredir'] = 'all'
if follow_redirects:
params['blredirect'] = 1
if redirectsOnly:
params['blfilterredir'] = 'redirects'
if not self.site().isAllowed('apihighlimits') and \
config.special_page_limit > 500:
params['bllimit'] = 500
if withTemplateInclusion or onlyTemplateInclusion:
params['list'].append('embeddedin')
params['eititle'] = self.title()
params['eilimit'] = config.special_page_limit
params['eifilterredir'] = 'all'
if follow_redirects:
params['eiredirect'] = 1
if redirectsOnly:
params['eifilterredir'] = 'redirects'
if not self.site().isAllowed('apihighlimits') and \
config.special_page_limit > 500:
params['eilimit'] = 500
allDone = False
while not allDone:
if not internal:
output(u'Getting references to %s via API...'
% self.title(asLink=True))
datas = query.GetData(params, self.site())
data = datas['query'].values()
if len(data) == 2:
data = data[0] + data[1]
else:
data = data[0]
refPages = set()
for blp in data:
pg = Page(self.site(), blp['title'], defaultNamespace=blp['ns'])
if pg in refPages:
continue
yield pg
refPages.add(pg)
if follow_redirects and 'redirect' in blp and \
'redirlinks' in blp:
for p in blp['redirlinks']:
plk = Page(self.site(), p['title'],
defaultNamespace=p['ns'])
if plk in refPages:
continue
yield plk
refPages.add(plk)
if follow_redirects and 'redirect' in p and plk != self:
for zms in plk.getReferences(follow_redirects,
withTemplateInclusion,
onlyTemplateInclusion,
redirectsOnly,
internal=True):
yield zms
else:
continue
else:
continue
if 'query-continue' in datas:
if 'backlinks' in datas['query-continue']:
params.update(datas['query-continue']['backlinks'])
if 'embeddedin' in datas['query-continue']:
params.update(datas['query-continue']['embeddedin'])
else:
allDone = True
def getReferencesOld(self, follow_redirects=True,
withTemplateInclusion=True,
onlyTemplateInclusion=False, redirectsOnly=False):
"""Yield all pages that link to the page.
"""
# Temporary bug-fix while researching more robust solution:
if config.special_page_limit > 999:
config.special_page_limit = 999
site = self.site()
path = self.site().references_address(self.urlname())
if withTemplateInclusion:
path += u'&hidetrans=0'
if onlyTemplateInclusion:
path += u'&hidetrans=0&hidelinks=1&hideredirs=1&hideimages=1'
if redirectsOnly:
path += u'&hideredirs=0&hidetrans=1&hidelinks=1&hideimages=1'
content = SoupStrainer("div", id=self.site().family.content_id)
try:
next_msg = self.site().mediawiki_message('whatlinkshere-next')
except KeyError:
next_msg = "next %i" % config.special_page_limit
plural = (config.special_page_limit == 1) and "\\1" or "\\2"
next_msg = re.sub(r"{{PLURAL:\$1\|(.*?)\|(.*?)}}", plural, next_msg)
nextpattern = re.compile("^%s$" % next_msg.replace("$1", "[0-9]+"))
delay = 1
if self.site().has_mediawiki_message("Isredirect"):
self._isredirectmessage = self.site().mediawiki_message(
"Isredirect")
if self.site().has_mediawiki_message("Istemplate"):
self._istemplatemessage = self.site().mediawiki_message(
"Istemplate")
# to avoid duplicates:
refPages = set()
while path:
output(u'Getting references to %s' % self.title(asLink=True))
get_throttle()
txt = self.site().getUrl(path)
body = BeautifulSoup(txt,
convertEntities=BeautifulSoup.HTML_ENTITIES,
parseOnlyThese=content)
next_text = body.find(text=nextpattern)
if next_text is not None and 'href' in next_text.parent:
path = next_text.parent['href'].replace("&", "&")
else:
path = ""
reflist = body.find("ul")
if reflist is None:
return
for page in self._parse_reflist(reflist,
follow_redirects,
withTemplateInclusion,
onlyTemplateInclusion,
redirectsOnly):
if page not in refPages:
yield page
refPages.add(page)
def _parse_reflist(self, reflist, follow_redirects=True,
withTemplateInclusion=True, onlyTemplateInclusion=False,
redirectsOnly=False):
"""For internal use only
Parse a "Special:Whatlinkshere" list of references and yield Page
objects that meet the criteria (used by getReferences)
"""
for link in reflist("li", recursive=False):
title = link.a.string
if title is None:
debug(u"invalid <li> item in Whatlinkshere: %s" % link)
try:
p = Page(self.site(), title)
except InvalidTitle:
debug(u"Whatlinkshere:%s contains invalid link to %s"
% (self.title(), title))
continue
isredirect, istemplate = False, False
textafter = link.a.findNextSibling(text=True)
if textafter is not None:
if self.site().has_mediawiki_message("Isredirect") \
and self._isredirectmessage in textafter:
# make sure this is really a redirect to this page
# (MediaWiki will mark as a redirect any link that follows
# a #REDIRECT marker, not just the first one).
if (p.getRedirectTarget().sectionFreeTitle()
== self.sectionFreeTitle()):
isredirect = True
if self.site().has_mediawiki_message("Istemplate") \
and self._istemplatemessage in textafter:
istemplate = True
if (withTemplateInclusion or onlyTemplateInclusion or
not istemplate) and (not redirectsOnly or isredirect) and \
(not onlyTemplateInclusion or istemplate):
yield p
continue
if isredirect and follow_redirects:
sublist = link.find("ul")
if sublist is not None:
for p in self._parse_reflist(sublist, follow_redirects,
withTemplateInclusion,
onlyTemplateInclusion,
redirectsOnly):
yield p
def _getActionUser(self, action, restriction='', sysop=False):
"""
Get the user to do an action: sysop or not sysop, or raise an exception
if the user cannot do that.
Parameters:
* action - the action to be done, which is the name of the right
* restriction - the restriction level or an empty string for no
restriction
* sysop - initially use sysop user?
"""
# Login
self.site().forceLogin(sysop=sysop)
# Check permissions
if not self.site().isAllowed(action, sysop):
if sysop:
raise LockedPage(u'The sysop user is not allowed to %s in site '
u'%s' % (action, self.site()))
else:
try:
user = self._getActionUser(action, restriction, sysop=True)
output(u'The user is not allowed to %s on site %s. Using '
u'sysop account.' % (action, self.site()))
return user
except NoUsername:
raise LockedPage(u'The user is not allowed to %s on site '
u'%s, and no sysop account is defined.'
% (action, self.site()))
except LockedPage:
raise
# Check restrictions
if not self.site().isAllowed(restriction, sysop):
if sysop:
raise LockedPage(u'Page on %s is locked in a way that sysop '
u'user cannot %s it' % (self.site(), action))
else:
try:
user = self._getActionUser(action, restriction, sysop=True)
output(u'Page is locked on %s - cannot %s, using sysop '
u'account.' % (self.site(), action))
return user
except NoUsername:
raise LockedPage(u'Page is locked on %s - cannot %s, and '
u'no sysop account is defined.'
% (self.site(), action))
except LockedPage:
raise
return sysop
def getRestrictions(self):
"""
Get the protections on the page.
* Returns a restrictions dictionary. Keys are 'edit' and 'move',
Values are None (no restriction for that action) or [level, expiry] :
* level is the level of auth needed to perform that action
('autoconfirmed' or 'sysop')
* expiry is the expiration time of the restriction
"""
#, titles = None
#if titles:
# restrictions = {}
#else:
restrictions = {'edit': None, 'move': None}
if not self.site().has_api():
return restrictions
predata = {
'action': 'query',
'prop': 'info',
'inprop': 'protection',
'titles': self.title(),
}
#if titles:
# predata['titles'] = titles
text = query.GetData(predata, self.site())['query']['pages']
for pageid in text:
if 'missing' in text[pageid]:
self._getexception = NoPage
raise NoPage('Page %s does not exist' % self.title(asLink=True))
elif not 'pageid' in text[pageid]:
# Don't know what may happen here.
# We may want to have better error handling
raise Error("BUG> API problem.")
if text[pageid]['protection'] != []:
## if titles:
## restrictions = dict([detail['type'], [detail['level'],
## detail['expiry']]]
## for detail in text[pageid]['protection'])
## else:
restrictions = dict([detail['type'], [detail['level'],
detail['expiry']]]
for detail in text[pageid]['protection'])
return restrictions
def put_async(self, newtext,
comment=None, watchArticle=None, minorEdit=True, force=False,
callback=None):
"""Put page on queue to be saved to wiki asynchronously.
Asynchronous version of put (takes the same arguments), which places
pages on a queue to be saved by a daemon thread. All arguments are
the same as for .put(), except --
callback: a callable object that will be called after the page put
operation; this object must take two arguments:
(1) a Page object, and (2) an exception instance, which
will be None if the page was saved successfully.
The callback is intended to be used by bots that need to keep track
of which saves were successful.
"""
try:
page_put_queue.mutex.acquire()
try:
_putthread.start()
except (AssertionError, RuntimeError):
pass
finally:
page_put_queue.mutex.release()
page_put_queue.put((self, newtext, comment, watchArticle, minorEdit,
force, callback))
def put(self, newtext, comment=None, watchArticle=None, minorEdit=True,
force=False, sysop=False, botflag=True, maxTries=-1):
"""Save the page with the contents of the first argument as the text.
Optional parameters:
comment: a unicode string that is to be used as the summary for
the modification.
watchArticle: a bool, add or remove this Page to/from bot user's
watchlist (if None, leave watchlist status unchanged)
minorEdit: mark this edit as minor if True
force: ignore botMayEdit() setting.
maxTries: the maximum amount of save attempts. -1 for infinite.
"""
# Login
try:
self.get()
except:
pass
sysop = self._getActionUser(action='edit',
restriction=self.editRestriction,
sysop=sysop)
username = self.site().loggedInAs()
# Check blocks
self.site().checkBlocks(sysop=sysop)
# Determine if we are allowed to edit
if not force:
if not self.botMayEdit(username):
raise LockedPage(
u'Not allowed to edit %s because of a restricting template'
% self.title(asLink=True))
elif self.site().has_api() and self.namespace() == 2 and (
self.title().endswith('.css') or
self.title().endswith('.js')):
titleparts = self.title().split("/")
userpageowner = titleparts[0].split(":")[1]
if userpageowner != username:
# API enable: if title ends with .css or .js in ns2
# it needs permission to edit user pages
if self.title().endswith('css'):
permission = 'editusercss'
else:
permission = 'edituserjs'
sysop = self._getActionUser(
action=permission,
restriction=self.editRestriction,
sysop=True)
# If there is an unchecked edit restriction, we need to load the page
if self._editrestriction:
output(u'Page %s is semi-protected. Getting edit page to find out '
u'if we are allowed to edit.' % self.title(asLink=True))
oldtime = self.editTime()
# Note: change_edit_time=True is always True since
# self.get() calls self._getEditPage without this parameter
self.get(force=True, change_edit_time=True)
newtime = self.editTime()
### TODO: we have different timestamp formats
if re.sub('\D', '', str(oldtime)) != re.sub('\D', '', str(newtime)):
# page was changed
raise EditConflict(u'Page has been changed after first read.')
self._editrestriction = False
# If no comment is given for the change, use the default
comment = comment or action
if config.cosmetic_changes and not self.isTalkPage() and \
not calledModuleName() in ('category_redirect', 'cosmetic_changes',
'touch'):
if config.cosmetic_changes_mylang_only:
cc = ((self.site.family.name == config.family and
self.site.lang == config.mylang) or
self.site.family.name in
config.cosmetic_changes_enable.keys() and
self.site.lang in config.cosmetic_changes_enable[
self.site.family.name])
else:
cc = True
cc = cc and not (self.site.family.name in
config.cosmetic_changes_disable.keys() and
self.site.lang in
config.cosmetic_changes_disable[
self.site.family.name])
if cc:
old = newtext
if verbose:
output(u'Cosmetic Changes for %s-%s enabled.'
% (self.site().family.name, self.site().lang))
import cosmetic_changes
from pywikibot import i18n
ccToolkit = cosmetic_changes.CosmeticChangesToolkit(
self.site(), redirect=self.isRedirectPage(),
namespace=self.namespace(), pageTitle=self.title())
newtext = ccToolkit.change(newtext)
if comment and \
old.strip().replace('\r\n',
'\n') != newtext.strip().replace('\r\n',
'\n'):
comment += i18n.twtranslate(self.site(),
'cosmetic_changes-append')
if watchArticle is None:
# if the page was loaded via get(), we know its status
if hasattr(self, '_isWatched'):
watchArticle = self._isWatched
else:
import watchlist
watchArticle = watchlist.isWatched(self.title(),
site=self.site())
newPage = not self.exists()
# if posting to an Esperanto wiki, we must e.g. write Bordeauxx instead
# of Bordeaux
if self.site().lang == 'eo' and not self.site().has_api():
newtext = encodeEsperantoX(newtext)
comment = encodeEsperantoX(comment)
return self._putPage(newtext, comment, watchArticle, minorEdit,
newPage, self.site().getToken(sysop=sysop),
sysop=sysop, botflag=botflag, maxTries=maxTries)
def _encodeArg(self, arg, msgForError):
"""Encode an ascii string/Unicode string to the site's encoding"""
try:
return arg.encode(self.site().encoding())
except UnicodeDecodeError, e:
# happens when arg is a non-ascii bytestring :
# when reencoding bytestrings, python decodes first to ascii
e.reason += ' (cannot convert input %s string to unicode)' \
% msgForError
raise e
except UnicodeEncodeError, e:
# happens when arg is unicode
e.reason += ' (cannot convert %s to wiki encoding %s)' \
% (msgForError, self.site().encoding())
raise e
def _putPage(self, text, comment=None, watchArticle=False, minorEdit=True,
newPage=False, token=None, newToken=False, sysop=False,
captcha=None, botflag=True, maxTries=-1):
"""Upload 'text' as new content of Page by API
Don't use this directly, use put() instead.
"""
if not self.site().has_api() or self.site().versionnumber() < 13:
# api not enabled or version not supported
return self._putPageOld(text, comment, watchArticle, minorEdit,
newPage, token, newToken, sysop, captcha,
botflag, maxTries)
retry_attempt = 0
retry_delay = 1
dblagged = False
params = {
'action': 'edit',
'title': self.title(),
'text': self._encodeArg(text, 'text'),
'summary': self._encodeArg(comment, 'summary'),
}
if token:
params['token'] = token
else:
params['token'] = self.site().getToken(sysop=sysop)
# Add server lag parameter (see config.py for details)
if config.maxlag:
params['maxlag'] = str(config.maxlag)
if self._editTime:
params['basetimestamp'] = self._editTime
else:
params['basetimestamp'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if self._startTime:
params['starttimestamp'] = self._startTime
else:
params['starttimestamp'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if botflag:
params['bot'] = 1
if minorEdit:
params['minor'] = 1
else:
params['notminor'] = 1
if watchArticle:
params['watch'] = 1
#else:
# params['unwatch'] = 1
if captcha:
params['captchaid'] = captcha['id']
params['captchaword'] = captcha['answer']
while True:
if (maxTries == 0):
raise MaxTriesExceededError()
maxTries -= 1
# Check whether we are not too quickly after the previous
# putPage, and wait a bit until the interval is acceptable
if not dblagged:
put_throttle()
# Which web-site host are we submitting to?
if newPage:
output(u'Creating page %s via API' % self.title(asLink=True))
params['createonly'] = 1
else:
output(u'Updating page %s via API' % self.title(asLink=True))
params['nocreate'] = 1
# Submit the prepared information
try:
response, data = query.GetData(params, self.site(), sysop=sysop,
back_response=True)
if isinstance(data, basestring):
raise KeyError
except httplib.BadStatusLine, line:
raise PageNotSaved('Bad status line: %s' % line.line)
except ServerError:
exception(tb=True)
retry_attempt += 1
if retry_attempt > config.maxretries:
raise
output(u'Got a server error when putting %s; will retry in %i '
u'minute%s.' % (self.title(asLink=True), retry_delay,
retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
except ValueError: # API result cannot decode
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
# If it has gotten this far then we should reset dblagged
dblagged = False
# Check blocks
self.site().checkBlocks(sysop=sysop)
# A second text area means that an edit conflict has occured.
if response.code == 500:
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if 'error' in data:
# All available error key in edit mode: (from ApiBase.php)
# 'noimageredirect-anon':"Anonymous users can't create image redirects",
# 'noimageredirect':"You don't have permission to create image redirects",
# 'filtered':"The filter callback function refused your edit",
# 'noedit-anon':"Anonymous users can't edit pages",
# 'noedit':"You don't have permission to edit pages",
# 'emptypage':"Creating new, empty pages is not allowed",
# 'badmd5':"The supplied MD5 hash was incorrect",
# 'notext':"One of the text, appendtext, prependtext and undo parameters must be set",
# 'emptynewsection':'Creating empty new sections is not possible.',
# 'revwrongpage':"r\$1 is not a revision of ``\$2''",
# 'undofailure':'Undo failed due to conflicting intermediate edits',
#for debug only
#------------------------
if verbose:
output("error occured, code:%s\n"
"info:%s\nstatus:%s\n"
"response:%s"
% (data['error']['code'], data['error']['info'],
response.code, response.msg))
faked = params
if 'text' in faked:
del faked['text']
output("OriginalData:%s" % unicode(repr(faked), "latin1"))
del faked
#------------------------
errorCode = data['error']['code']
#cannot handle longpageerror and PageNoSave yet
if errorCode == 'maxlag' or response.code == 503:
# server lag; wait for the lag time and retry
lagpattern = re.compile(
r"Waiting for [\d.]+: (?P<lag>\d+) seconds? lagged")
lag = lagpattern.search(data['error']['info'])
timelag = int(lag.group("lag"))
output(u"Pausing %d seconds due to database server lag."
% min(timelag, 300))
dblagged = True
time.sleep(min(timelag, 300))
continue
elif errorCode == 'editconflict':
# 'editconflict':"Edit conflict detected",
raise EditConflict(u'An edit conflict has occured.')
elif errorCode == 'spamdetected':
# 'spamdetected':
# "Your edit was refused because it contained a spam
# fragment: ``\$1''",
raise SpamfilterError(data['error']['info'][62:-2])
elif errorCode == 'pagedeleted':
# 'pagedeleted':
# "The page has been deleted since you fetched its
# timestamp",
# Make sure your system clock is correct if this error
# occurs without any reason!
# raise EditConflict(u'Someone deleted the page.')
# No raise, simply define these variables and retry:
params['recreate'] = 1
if self._editTime:
params['basetimestamp'] = self._editTime
else:
params['basetimestamp'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if self._startTime:
params['starttimestamp'] = self._startTime
else:
params['starttimestamp'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
continue
elif errorCode == 'readonly':
# 'readonly':"The wiki is currently in read-only mode"
output(u"The database is currently locked for write access;"
u"will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
elif errorCode == 'contenttoobig':
# 'contenttoobig':
# "The content you supplied exceeds the article size limit
# of \$1 kilobytes",
raise LongPageError(len(params['text']),
int(data['error']['info'][59:-10]))
elif errorCode in ['protectedpage', 'customcssjsprotected',
'cascadeprotected', 'protectednamespace',
'protectednamespace-interface']:
# 'protectedpage':"The ``\$1'' right is required to edit this page"
# 'cascadeprotected':"The page you're trying to edit is protected because it's included in a cascade-protected page"
# 'customcssjsprotected': "You're not allowed to edit custom CSS and JavaScript pages"
# 'protectednamespace': "You're not allowed to edit pages in the ``\$1'' namespace"
# 'protectednamespace-interface':"You're not allowed to edit interface messages"
#
# The page is locked. This should have already been
# detected when getting the page, but there are some
# reasons why this didn't work, e.g. the page might be
# locked via a cascade lock.
try:
# Page is locked - try using the sysop account, unless
# we're using one already
if sysop: # Unknown permissions error
raise LockedPage()
else:
self.site().forceLogin(sysop=True)
output(u'Page is locked, retrying using sysop '
u'account.')
return self._putPage(text, comment, watchArticle,
minorEdit, newPage,
token=self.site().getToken(
sysop=True),
sysop=True)
except NoUsername:
raise LockedPage()
elif errorCode == 'badtoken':
if newToken:
output(u"Edit token has failed. Giving up.")
else:
# We might have been using an outdated token
output(u"Edit token has failed. Retrying.")
return self._putPage(text, comment, watchArticle,
minorEdit, newPage,
token=self.site().getToken(
sysop=sysop,
getagain=True),
newToken=True, sysop=sysop)
# I think the error message title was changed from
# "Wikimedia Error" to "Wikipedia has a problem", but I'm not
# sure. Maybe we could just check for HTTP Status 500
# (Internal Server Error)?
else:
error("API Error code:%s" % data['error']['code'])
output("Information:%s" % data['error']['info'])
else:
if data['edit']['result'] == u"Success":
#
# The status code for update page completed in ordinary mode
# is 302 - found. But API is always 200 - OK because it only
# send "success" back in string. If the page update is
# successed, we need to return code 302 for cheat script who
# using status code
#
return 302, response.msg, data['edit']
if data['edit']['result'] == u"Failure":
if u'spamblacklist' in data['edit']:
raise SpamfilterError(data['edit'][u'spamblacklist'])
solve = self.site().solveCaptcha(data)
if solve:
return self._putPage(text, comment, watchArticle, minorEdit,
newPage, token, newToken, sysop,
captcha=solve)
return response.code, response.msg, data
def _putPageOld(self, text, comment=None, watchArticle=False,
minorEdit=True, newPage=False, token=None, newToken=False,
sysop=False, captcha=None, botflag=True, maxTries=-1):
"""Upload 'text' as new content of Page by filling out the edit form.
Don't use this directly, use put() instead.
"""
host = self.site().hostname()
# Get the address of the page on that host.
address = self.site().put_address(self.urlname())
predata = {
'wpSave': '1',
'wpSummary': self._encodeArg(comment, 'edit summary'),
'wpTextbox1': self._encodeArg(text, 'wikitext'),
# As of October 2008, MW HEAD requires wpSection to be set.
# We will need to fill this more smartly if we ever decide to edit
# by section
'wpSection': '',
}
if not botflag:
predata['bot'] = '0'
if captcha:
predata["wpCaptchaId"] = captcha['id']
predata["wpCaptchaWord"] = captcha['answer']
# Add server lag parameter (see config.py for details)
if config.maxlag:
predata['maxlag'] = str(config.maxlag)
# <s>Except if the page is new, we need to supply the time of the
# previous version to the wiki to prevent edit collisions</s>
# As of Oct 2008, these must be filled also for new pages
if self._editTime:
predata['wpEdittime'] = self._editTime
else:
predata['wpEdittime'] = time.strftime('%Y%m%d%H%M%S', time.gmtime())
if self._startTime:
predata['wpStarttime'] = self._startTime
else:
predata['wpStarttime'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if self._revisionId:
predata['baseRevId'] = self._revisionId
# Pass the minorEdit and watchArticle arguments to the Wiki.
if minorEdit:
predata['wpMinoredit'] = '1'
if watchArticle:
predata['wpWatchthis'] = '1'
# Give the token, but only if one is supplied.
if token:
predata['wpEditToken'] = token
# Sorry, single-site exception...
if self.site().fam().name == 'loveto' and \
self.site().language() == 'recipes':
predata['masteredit'] = '1'
retry_delay = 1
retry_attempt = 0
dblagged = False
wait = 5
while True:
if (maxTries == 0):
raise MaxTriesExceededError()
maxTries -= 1
# Check whether we are not too quickly after the previous
# putPage, and wait a bit until the interval is acceptable
if not dblagged:
put_throttle()
# Which web-site host are we submitting to?
if newPage:
output(u'Creating page %s' % self.title(asLink=True))
else:
output(u'Changing page %s' % self.title(asLink=True))
# Submit the prepared information
try:
response, data = self.site().postForm(address, predata, sysop)
if response.code == 503:
if 'x-database-lag' in response.msg.keys():
# server lag; Mediawiki recommends waiting 5 seconds
# and retrying
if verbose:
output(data, newline=False)
output(u"Pausing %d seconds due to database server lag."
% wait)
dblagged = True
time.sleep(wait)
wait = min(wait * 2, 300)
continue
# Squid error 503
raise ServerError(response.code)
except httplib.BadStatusLine, line:
raise PageNotSaved('Bad status line: %s' % line.line)
except ServerError:
exception(tb=True)
retry_attempt += 1
if retry_attempt > config.maxretries:
raise
output(u'Got a server error when putting %s; will retry in %i '
u'minute%s.' % (self.title(asLink=True), retry_delay,
retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
# If it has gotten this far then we should reset dblagged
dblagged = False
# Check blocks
self.site().checkBlocks(sysop=sysop)
# A second text area means that an edit conflict has occured.
editconflict1 = re.compile(
'id=["\']wpTextbox2[\'"] name="wpTextbox2"')
editconflict2 = re.compile('name="wpTextbox2" id="wpTextbox2"')
if editconflict1.search(data) or editconflict2.search(data):
raise EditConflict(u'An edit conflict has occured.')
# remove the wpAntispam keyword before checking for Spamfilter
data = re.sub(u'(?s)<label for="wpAntispam">.*?</label>', '', data)
if self.site().has_mediawiki_message("spamprotectiontitle")\
and self.site().mediawiki_message(
'spamprotectiontitle') in data:
try:
reasonR = re.compile(
re.escape(self.site().mediawiki_message(
'spamprotectionmatch')).replace(
'\$1', '(?P<url>[^<]*)'))
url = reasonR.search(data).group('url')
except:
# Some wikis have modified the spamprotectionmatch
# template in a way that the above regex doesn't work,
# e.g. on he.wikipedia the template includes a
# wikilink, and on fr.wikipedia there is bold text.
# This is a workaround for this: it takes the region
# which should contain the spamfilter report and the
# URL. It then searches for a plaintext URL.
relevant = data[data.find(
'<!-- start content -->') + 22:data.find(
'<!-- end content -->')].strip()
# Throw away all the other links etc.
relevant = re.sub('<.*?>', '', relevant)
relevant = relevant.replace(':', ':')
# MediaWiki only spam-checks HTTP links, and only the
# domain name part of the URL.
m = re.search('http://[\w\-\.]+', relevant)
if m:
url = m.group()
else:
# Can't extract the exact URL. Let the user search.
url = relevant
raise SpamfilterError(url)
if '<label for=\'wpRecreate\'' in data:
# Make sure your system clock is correct if this error occurs
# without any reason!
# raise EditConflict(u'Someone deleted the page.')
# No raise, simply define these variables and retry:
if self._editTime:
predata['wpEdittime'] = self._editTime
else:
predata['wpEdittime'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if self._startTime:
predata['wpStarttime'] = self._startTime
else:
predata['wpStarttime'] = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
continue
if self.site().has_mediawiki_message("viewsource")\
and self.site().mediawiki_message('viewsource') in data:
# The page is locked. This should have already been
# detected when getting the page, but there are some
# reasons why this didn't work, e.g. the page might be
# locked via a cascade lock.
try:
# Page is locked - try using the sysop account, unless we're
# using one already
if sysop:
# Unknown permissions error
raise LockedPage()
else:
self.site().forceLogin(sysop=True)
output(u'Page is locked, retrying using sysop account.')
return self._putPageOld(text, comment, watchArticle,
minorEdit, newPage,
token=self.site().getToken(
sysop=True),
sysop=True)
except NoUsername:
raise LockedPage()
if not newToken and "<textarea" in data:
## # for debug use only, if badtoken still happen
## if "<textarea" in data:
# We might have been using an outdated token
output(u"Changing page has failed. Retrying.")
return self._putPageOld(text, comment, watchArticle, minorEdit,
newPage,
token=self.site().getToken(
sysop=sysop, getagain=True),
newToken=True, sysop=sysop)
# I think the error message title was changed from "Wikimedia Error"
# to "Wikipedia has a problem", but I'm not sure. Maybe we could
# just check for HTTP Status 500 (Internal Server Error)?
if ("<title>Wikimedia Error</title>" in data or
"has a problem</title>" in data or response.code == 500):
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if ("1213: Deadlock found when trying to get lock" in data):
output(u"Deadlock error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if self.site().mediawiki_message('readonly') in data or \
self.site().mediawiki_message('readonly_lag') in data:
output(u"The database is currently locked for write access; "
u"will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if self.site().has_mediawiki_message('longpageerror'):
# FIXME: Long page error detection isn't working in vi-wiki.
long_page_errorR = re.compile(
# Some wikis (e.g. Lithuanian and Slovak Wikipedia) use
# {{plural}} in [[MediaWiki:longpageerror]]
re.sub(
r'\\{\\{plural\\:.*?\\}\\}', '.*?',
re.escape(
html2unicode(self.site().mediawiki_message(
'longpageerror')))).replace(
"\$1", "(?P<length>[\d,.\s]+)", 1).replace(
"\$2", "(?P<limit>[\d,.\s]+)", 1),
re.UNICODE)
match = long_page_errorR.search(data)
if match:
# Some wikis (e.g. Lithuanian Wikipedia) don't use
# $2 parameter in [[MediaWiki:longpageerror]]
longpage_length = 0
longpage_limit = 0
if 'length' in match.groups():
longpage_length = match.group('length')
if 'limit' in match.groups():
longpage_limit = match.group('limit')
raise LongPageError(longpage_length, longpage_limit)
# We might have been prompted for a captcha if the
# account is not autoconfirmed, checking....
## output('%s' % data) # WHY?
solve = self.site().solveCaptcha(data)
if solve:
return self._putPageOld(text, comment, watchArticle, minorEdit,
newPage, token, newToken, sysop,
captcha=solve)
# We are expecting a 302 to the action=view page. I'm not sure why
# this was removed in r5019
if response.code != 302 and data.strip() != u"":
# Something went wrong, and we don't know what. Show the
# HTML code that hopefully includes some error message.
error(u"Unexpected response from wiki server.")
output(u" %s (%s) " % (response.code, response.msg))
output(data)
# Unexpected responses should raise an error and not pass,
# be it silently or loudly. This should raise an error
if 'name="wpTextbox1"' in data and \
'var wgAction = "submit"' in data:
# We are on the preview page, so the page was not saved
raise PageNotSaved
return response.code, response.msg, data
## @since r10311
# @remarks to support appending to single sections
def append(self, newtext, comment=None, minorEdit=True, section=0):
"""Append the wiki-text to the page.
Returns the result of text append to page section number 'section'.
0 for the top section, 'new' for a new section (end of page).
"""
# If no comment is given for the change, use the default
comment = comment or pywikibot.action
# send data by POST request
params = {
'action': 'edit',
'title': self.title(),
'section': '%s' % section,
'token': self.site().getToken(),
'summary': self._encodeArg(comment, 'summary'),
'bot': 1,
}
if section == 'new':
params['text'] = self._encodeArg(newtext, 'text')
else:
params['appendtext'] = self._encodeArg(newtext, 'text')
if minorEdit:
params['minor'] = 1
else:
params['notminor'] = 1
response, data = query.GetData(params, self.site(), back_response=True)
if not (data['edit']['result'] == u"Success"):
raise PageNotSaved('Bad result returned: %s'
% data['edit']['result'])
return response.code, response.msg, data
def protection(self):
"""Return list of dicts of this page protection level. like:
[{u'expiry': u'2010-05-26T14:41:51Z', u'type': u'edit',
u'level': u'autoconfirmed'}, {u'expiry': u'2010-05-26T14:41:51Z',
u'type': u'move', u'level': u'sysop'}]
if the page non protection, return []
"""
params = {
'action': 'query',
'prop': 'info',
'inprop': 'protection',
'titles': self.title(),
}
datas = query.GetData(params, self.site())
data = datas['query']['pages'].values()[0]['protection']
return data
@deprecate_arg("api", None)
def interwiki(self):
"""Return a list of interwiki links in the page text.
This will retrieve the page to do its work, so it can raise
the same exceptions that are raised by the get() method.
The return value is a list of Page objects for each of the
interwiki links in the page text.
"""
if hasattr(self, "_interwikis"):
return self._interwikis
if self.site.has_transcluded_data:
params = {
'action': 'query',
'prop': 'langlinks',
'titles': self.title(),
}
if not self.site().isAllowed('apihighlimits') and \
config.special_page_limit > 500:
params['cllimit'] = 500
iwlinks = []
while True:
datas = query.GetData(params, self.site())
data = datas['query']['pages'].values()[0]
if "langlinks" in data:
for c in data['langlinks']:
try:
llpage = Page(getSite(c["lang"]), c["*"])
except NoSuchSite:
continue
iwlinks.append(llpage)
if 'query-continue' in datas:
if 'langlinks' in datas['query-continue']:
params.update(datas['query-continue']['langlinks'])
else:
break
self._interwikis = iwlinks
return iwlinks
text = self.get()
# Replace {{PAGENAME}} by its value
for pagenametext in self.site().pagenamecodes(self.site().language()):
text = text.replace(u"{{%s}}" % pagenametext, self.title())
ll = getLanguageLinks(text, insite=self.site(),
pageLink=self.title(asLink=True))
result = ll.values()
self._interwikis = result
return result
def categories(self, get_redirect=False, api=False):
"""Return a list of Category objects that the article is in.
Please be aware: the api call returns also categies which are included
by templates. This differs to the old non-api code. If you need only
these categories which are in the page text please use getCategoryLinks
(or set api=False but this could be deprecated in future).
"""
if not (self.site().has_api() and api):
try:
category_links_to_return = getCategoryLinks(
self.get(get_redirect=get_redirect), self.site())
except NoPage:
category_links_to_return = []
return category_links_to_return
else:
import catlib
params = {
'action': 'query',
'prop': 'categories',
'titles': self.title(),
}
if not self.site().isAllowed('apihighlimits') and \
config.special_page_limit > 500:
params['cllimit'] = 500
output(u'Getting categories in %s via API...'
% self.title(asLink=True))
allDone = False
cats = []
while not allDone:
datas = query.GetData(params, self.site())
data = datas['query']['pages'].values()[0]
if "categories" in data:
for c in data['categories']:
if c['ns'] is 14:
cat = catlib.Category(self.site(), c['title'])
cats.append(cat)
if 'query-continue' in datas:
if 'categories' in datas['query-continue']:
params.update(datas['query-continue']['categories'])
else:
allDone = True
return cats
def linkedPages(self, withImageLinks=False):
"""Return a list of Pages that this Page links to.
Only returns pages from "normal" internal links. Category links are
omitted unless prefixed with ":". Image links are omitted when parameter
withImageLinks is False. Embedded templates are omitted (but links
within them are returned). All interwiki and external links are omitted.
@param withImageLinks: include Image links
@return: a list of Page objects.
"""
result = []
try:
thistxt = removeLanguageLinks(self.get(get_redirect=True),
self.site())
except NoPage:
raise
except IsRedirectPage:
raise
except SectionError:
return []
thistxt = removeCategoryLinks(thistxt, self.site())
# remove HTML comments, pre, nowiki, and includeonly sections
# from text before processing
thistxt = removeDisabledParts(thistxt)
# resolve {{ns:-1}} or {{ns:Help}}
thistxt = self.site().resolvemagicwords(thistxt)
for match in Rlink.finditer(thistxt):
title = match.group('title')
title = title.replace("_", " ").strip(" ")
if self.namespace() in self.site.family.namespacesWithSubpage:
# convert relative link to absolute link
if title.startswith(".."):
parts = self.title().split('/')
parts.pop()
title = u'/'.join(parts) + title[2:]
elif title.startswith("/"):
title = u'%s/%s' % (self.title(), title[1:])
if title.startswith("#"):
# this is an internal section link
continue
if not self.site().isInterwikiLink(title):
try:
page = Page(self.site(), title)
try:
hash(str(page))
except Exception:
raise Error(u"Page %s contains invalid link to [[%s]]."
% (self.title(), title))
except Error:
if verbose:
output(u"Page %s contains invalid link to [[%s]]."
% (self.title(), title))
continue
if not withImageLinks and page.isImage():
continue
if page.sectionFreeTitle() and page not in result:
result.append(page)
return result
def imagelinks(self, followRedirects=False, loose=False):
"""Return a list of ImagePage objects for images displayed on this Page.
Includes images in galleries.
If loose is True, this will find anything that looks like it
could be an image. This is useful for finding, say, images that are
passed as parameters to templates.
"""
results = []
# Find normal images
for page in self.linkedPages(withImageLinks=True):
if page.isImage():
# convert Page object to ImagePage object
results.append(ImagePage(page.site(), page.title()))
# Find images in galleries
pageText = self.get(get_redirect=followRedirects)
galleryR = re.compile('<gallery[^>]*>.*?</gallery>', re.DOTALL)
galleryEntryR = re.compile(
'(?P<title>(?:%s|%s):[\|\r\n]+?)(?:\|.*)?\r?\n'
% (self.site().image_namespace(),
self.site().family.image_namespace(code='_default')))
for gallery in galleryR.findall(pageText):
for match in galleryEntryR.finditer(gallery):
results.append(ImagePage(self.site(), match.group('title')))
if loose:
ns = getSite().image_namespace()
imageR = re.compile(
'\w\w\w+\.(?:gif|png|jpg|jpeg|svg|JPG|xcf|pdf|mid|ogg|djvu)',
re.IGNORECASE)
for imageName in imageR.findall(pageText):
results.append(ImagePage(self.site(), imageName))
return list(set(results))
def templates(self, get_redirect=False):
"""Return a list of titles (unicode) of templates used on this Page.
Template parameters are ignored.
"""
if not hasattr(self, "_templates"):
self._templates = list(set([template
for (template, param)
in self.templatesWithParams(
get_redirect=get_redirect)]))
return self._templates
def templatesWithParams(self, thistxt=None, get_redirect=False):
"""Return a list of templates used on this Page.
Return value is a list of tuples. There is one tuple for each use of
a template in the page, with the template title as the first entry
and a list of parameters as the second entry.
If thistxt is set, it is used instead of current page content.
"""
if not thistxt:
try:
thistxt = self.get(get_redirect=get_redirect)
except (IsRedirectPage, NoPage):
return []
else:
pass
## output(u"""\
##thistxt argument of templatesWithParams is deprecated. Please use textlib method
##extract_templates_and_params() instead.""")
return [(t[0].upper() + t[1:], p) for t, p in
extract_templates_and_params(thistxt, asList=True)]
def getRedirectTarget(self):
"""Return a Page object for the target this Page redirects to.
If this page is not a redirect page, will raise an IsNotRedirectPage
exception. This method also can raise a NoPage exception.
"""
try:
self.get()
except NoPage:
raise
except IsRedirectPage, err:
# otherwise it will return error pages with " inside.
target = err[0].replace('&quot;', '"')
if '|' in target:
warnings.warn("'%s' has a | character, this makes no sense"
% target, Warning)
return Page(self.site(), target)
else:
raise IsNotRedirectPage(self)
def getMovedTarget(self):
"""Return a Page object for the target this Page was moved to.
If this page was not moved, it will raise a NoPage exception.
"""
gen = self.site.logpages(number=1, mode='move', title=self.title(),
dump=True)
try:
lastmove = gen.next()['move']
except StopIteration:
raise NoPage(self.site(), unicode(self))
return Page(self.site, lastmove['new_title'])
def getVersionHistory(self, forceReload=False, reverseOrder=False,
getAll=False, revCount=500):
"""Load the version history page and return history information.
Return value is a list of tuples, where each tuple represents one
edit and is built of revision id, edit date/time, user name,
edit summary, size and tags. Starts with the most current revision,
unless reverseOrder is True.
Defaults to getting the first revCount edits, unless getAll is True.
@param revCount: iterate no more than this number of revisions in total
"""
# regular expression matching one edit in the version history.
# results will have 4 groups: oldid, edit date/time, user name, and edit
# summary.
thisHistoryDone = False
# Used in determining whether we need to skip the first page
skip = False
dataQuery = []
hasData = False
# Are we getting by Earliest first?
if reverseOrder:
# Check if _versionhistoryearliest exists
if not hasattr(self, '_versionhistoryearliest') or forceReload:
self._versionhistoryearliest = []
elif getAll and len(self._versionhistoryearliest) == revCount:
# Cause a reload, or at least make the loop run
thisHistoryDone = False
skip = True
dataQuery = self._versionhistoryearliest
else:
thisHistoryDone = True
elif (not hasattr(self, '_versionhistory') or forceReload or
len(self._versionhistory) < revCount):
self._versionhistory = []
# ?? does not load if len(self._versionhistory) > revCount
# shouldn't it
elif getAll and len(self._versionhistory) == revCount:
# Cause a reload, or at least make the loop run
thisHistoryDone = False
skip = True
dataQuery = self._versionhistory
else:
thisHistoryDone = True
if not thisHistoryDone:
dataQuery.extend(self._getVersionHistory(getAll, skip,
reverseOrder, revCount))
if reverseOrder:
# Return only revCount edits, even if the version history is
# extensive
if dataQuery != []:
self._versionhistoryearliest = dataQuery
del dataQuery
if len(self._versionhistoryearliest) > revCount and not getAll:
return self._versionhistoryearliest[:revCount]
return self._versionhistoryearliest
if dataQuery != []:
self._versionhistory = dataQuery
del dataQuery
# Return only revCount edits, even if the version history is extensive
if len(self._versionhistory) > revCount and not getAll:
return self._versionhistory[:revCount]
return self._versionhistory
def _getVersionHistory(self, getAll=False, skipFirst=False,
reverseOrder=False, revCount=500, rvprop=None):
"""Load history informations by API query.
Internal use for self.getVersionHistory(),
don't use this function directly.
"""
if not self.site().has_api() or self.site().versionnumber() < 8:
return self._getVersionHistoryOld(getAll, skipFirst,
reverseOrder, revCount)
dataQ = []
thisHistoryDone = False
params = {
'action': 'query',
'prop': 'revisions',
'titles': self.title(),
'rvprop': rvprop or 'ids|timestamp|user|comment|size|tags',
'rvlimit': revCount,
}
while not thisHistoryDone:
if reverseOrder:
params['rvdir'] = 'newer'
result = query.GetData(params, self.site())
if 'error' in result:
raise RuntimeError("%s" % result['error'])
pageInfo = result['query']['pages'].values()[0]
if result['query']['pages'].keys()[0] == "-1":
if 'missing' in pageInfo:
raise NoPage(self.site(), unicode(self),
"Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
if 'query-continue' in result:
params.update(result['query-continue']['revisions'])
else:
thisHistoryDone = True
if revCount > 0:
avail_revs = len(pageInfo['revisions'])
pageInfo['revisions'] = pageInfo['revisions'][:min(revCount, avail_revs)]
if not getAll:
revCount = revCount - avail_revs
else:
pageInfo['revisions'] = []
thisHistoryDone = True
if skipFirst:
skipFirst = False
else:
for r in pageInfo['revisions']:
# set defaults
values = {
'ids': None,
'timestamp': None,
'user': None,
'flags': None,
'comment': u'',
'size': -1,
'tags': [],
'content': u'',
}
values.update(r)
if 'revid' in r:
values['ids'] = r['revid']
if '*' in r:
values['content'] = r['*']
elements = params['rvprop'].split('|')
row = [values[e] for e in elements]
dataQ.append(tuple(row))
return dataQ
def _getVersionHistoryOld(self, getAll=False, skipFirst=False,
reverseOrder=False, revCount=500):
"""Load the version history page and return history information.
Internal use for self.getVersionHistory(),
don't use this function directly.
"""
dataQ = []
thisHistoryDone = False
startFromPage = None
if self.site().versionnumber() < 4:
editR = re.compile(
'<li>\(.*?\)\s+\(.*\).*?'
'<a href=".*?oldid=([0-9]*)" title=".*?">([^<]*)</a> '
'<span class=\'user\'><a href=".*?" title=".*?">'
'([^<]*?)</a></span>.*?'
'(?:<span class=\'comment\'>(.*?)</span>)?</li>')
elif self.site().versionnumber() < 15:
editR = re.compile(
'<li>\(.*?\)\s+\(.*\).*?'
'<a href=".*?oldid=([0-9]*)" title=".*?">([^<]*)</a> '
'(?:<span class=\'history-user\'>|)<a href=".*?" title=".*?">'
'([^<]*?)</a>.*?(?:</span>|).*?'
'(?:<span class=[\'"]comment[\'"]>(.*?)</span>)?</li>')
elif self.site().versionnumber() < 16:
editR = re.compile(
r'<li class=".*?">\((?:\w*|<a[^<]*</a>)\)\s\((?:\w*|<a[^<]*'
r'</a>)\).*?<a href=".*?([0-9]*)" title=".*?">([^<]*)</a> '
r'<span class=\'history-user\'><a [^>]*?>([^<]*?)</a>.*?</span>'
r'</span>(?: <span class="minor">.*?</span>|)'
r'(?: <span class="history-size">.*?</span>|)'
r'(?: <span class=[\'"]comment[\'"]>\('
r'(?:<span class="autocomment">|)(.*?)(?:</span>|)\)</span>)?'
r'(?: \(<span class="mw-history-undo">.*?</span>\)|)\s*</li>',
re.UNICODE)
else:
editR = re.compile(
r'<li(?: class="mw-tag[^>]+)?>\((?:\w+|<a[^<]*</a>)\)\s\('
r'(?:\w+|<a[^<]*</a>)\).*?<a href=".*?([0-9]*)" title=".*?">'
r'([^<]*)</a> <span class=\'history-user\'><a [^>]*?>([^<]*?)'
r'</a>.*?</span></span>(?: <abbr class="minor"[^>]*?>.*?'
r'</abbr>|)(?: <span class="history-size">.*?</span>|)'
r'(?: <span class="comment">\((?:<span class="autocomment">|)'
r'(.*?)(?:</span>|)\)</span>)?(?: \('
r'<span class="mw-history-undo">.*?</span>\))?'
r'(?: <span class="mw-tag-markers">.*?</span>\)</span>)?\s*'
r'</li>', re.UNICODE)
RLinkToNextPage = re.compile('&offset=(.*?)&')
while not thisHistoryDone:
path = self.site().family.version_history_address(
self.site().language(), self.urlname(),
config.special_page_limit)
if reverseOrder:
path += '&dir=prev'
if startFromPage:
path += '&offset=' + startFromPage
# this loop will run until the page could be retrieved
# Try to retrieve the page until it was successfully loaded (just in
# case the server is down or overloaded)
# wait for retry_idle_time minutes (growing!) between retries.
retry_idle_time = 1
if verbose:
if startFromPage:
output(u'Continuing to get version history of %s' % self)
else:
output(u'Getting version history of %s' % self)
txt = self.site().getUrl(path)
# save a copy of the text
self_txt = txt
# Find the nextPage link, if not exist, the page is last history
# page
matchObj = RLinkToNextPage.search(self_txt)
if getAll and matchObj:
startFromPage = matchObj.group(1)
else:
thisHistoryDone = True
if not skipFirst:
edits = editR.findall(self_txt)
if skipFirst:
# Skip the first page only,
skipFirst = False
else:
if reverseOrder:
edits.reverse()
#for edit in edits:
dataQ.extend(edits)
if len(edits) < revCount:
thisHistoryDone = True
return dataQ
def getVersionHistoryTable(self, forceReload=False, reverseOrder=False,
getAll=False, revCount=500):
"""Return the version history as a wiki table."""
result = '{| class="wikitable"\n'
result += '! oldid || date/time || size || username || edit summary\n'
for oldid, time, username, summary, size, tags \
in self.getVersionHistory(forceReload=forceReload,
reverseOrder=reverseOrder,
getAll=getAll, revCount=revCount):
result += '|----\n'
result += '| %s || %s || %d || %s || <nowiki>%s</nowiki>\n' \
% (oldid, time, size, username, summary)
result += '|}\n'
return result
def fullVersionHistory(self, getAll=False, skipFirst=False,
reverseOrder=False, revCount=500):
"""Iterate previous versions including wikitext.
Gives a list of tuples consisting of revision ID, edit date/time, user
name and content
"""
if not self.site().has_api() or self.site().versionnumber() < 8:
address = self.site().export_address()
predata = {
'action': 'submit',
'pages': self.title()
}
get_throttle(requestsize=10)
now = time.time()
response, data = self.site().postForm(address, predata)
data = data.encode(self.site().encoding())
output = []
# TODO: parse XML using an actual XML parser instead of regex!
r = re.compile(
"\<revision\>.*?\<id\>(?P<id>.*?)\<\/id\>.*?\<timestamp\>"
"(?P<timestamp>.*?)\<\/timestamp\>.*?\<(?:ip|username)\>"
"(?P<user>.*?)\</(?:ip|username)\>.*?\<text.*?\>"
"(?P<content>.*?)\<\/text\>", re.DOTALL)
return [(match.group('id'),
match.group('timestamp'),
unescape(match.group('user')),
unescape(match.group('content')))
for match in r.finditer(data)]
# Load history informations by API query.
return self._getVersionHistory(getAll=getAll, skipFirst=skipFirst,
reverseOrder=reverseOrder,
revCount=revCount,
rvprop='ids|timestamp|user|content')
def contributingUsers(self, step=None, total=None):
"""Return a set of usernames (or IPs) of users who edited this page.
@param step: limit each API call to this number of revisions
- not used yet, only in rewrite branch -
@param total: iterate no more than this number of revisions in total
"""
if total is None:
total = 500 # set to default of getVersionHistory
edits = self.getVersionHistory(revCount=total)
users = set([edit[2] for edit in edits])
return users
def getCreator(self):
""" Function to get the first editor and time stamp of a page """
inf = self.getVersionHistory(reverseOrder=True, revCount=1)[0]
return inf[2], inf[1]
def getLatestEditors(self, limit=1):
""" Function to get the last editors of a page """
#action=query&prop=revisions&titles=API&rvprop=timestamp|user|comment
if hasattr(self, '_versionhistory'):
data = self.getVersionHistory(getAll=True, revCount=limit)
else:
data = self.getVersionHistory(revCount=limit)
result = []
for i in data:
result.append({'user': i[2], 'timestamp': i[1]})
return result
def watch(self, unwatch=False):
"""Add this page to the watchlist"""
if self.site().has_api:
params = {
'action': 'watch',
'title': self.title()
}
# watchtoken is needed for mw 1.18
# TODO: Find a better implementation for other actions too
# who needs a token
if self.site().versionnumber() >= 18:
api = {
'action': 'query',
'prop': 'info',
'titles': self.title(),
'intoken': 'watch',
}
data = query.GetData(api, self.site())
params['token'] = data[
'query']['pages'].values()[0]['watchtoken']
if unwatch:
params['unwatch'] = ''
data = query.GetData(params, self.site())
if 'error' in data:
raise RuntimeError("API query error: %s" % data['error'])
else:
urlname = self.urlname()
if not unwatch:
address = self.site().watch_address(urlname)
else:
address = self.site().unwatch_address(urlname)
response = self.site().getUrl(address)
return response
def unwatch(self):
self.watch(unwatch=True)
def move(self, newtitle, reason=None, movetalkpage=True, movesubpages=False,
sysop=False, throttle=True, deleteAndMove=False, safe=True,
fixredirects=True, leaveRedirect=True):
"""Move this page to new title.
* fixredirects has no effect in MW < 1.13
@param newtitle: The new page title.
@param reason: The edit summary for the move.
@param movetalkpage: If true, move this page's talk page (if it exists)
@param sysop: Try to move using sysop account, if available
@param deleteAndMove: if move succeeds, delete the old page
(usually requires sysop privileges, depending on wiki settings)
@param safe: If false, attempt to delete existing page at newtitle
(if there is one) and then move this page to that title
"""
if not self.site().has_api() or self.site().versionnumber() < 12:
return self._moveOld(newtitle, reason, movetalkpage, sysop,
throttle, deleteAndMove, safe, fixredirects,
leaveRedirect)
# Login
try:
self.get()
except:
pass
sysop = self._getActionUser(action='move',
restriction=self.moveRestriction,
sysop=False)
if deleteAndMove:
sysop = self._getActionUser(action='delete', restriction='',
sysop=True)
Page(self.site(), newtitle).delete(self.site().mediawiki_message(
'delete_and_move_reason'), False, False)
# Check blocks
self.site().checkBlocks(sysop=sysop)
if throttle:
put_throttle()
if reason is None:
pywikibot.output(u'Moving %s to [[%s]].'
% (self.title(asLink=True), newtitle))
reason = input(u'Please enter a reason for the move:')
if self.isTalkPage():
movetalkpage = False
params = {
'action': 'move',
'from': self.title(),
'to': newtitle,
'token': self.site().getToken(sysop=sysop),
'reason': reason,
}
if movesubpages:
params['movesubpages'] = 1
if movetalkpage:
params['movetalk'] = 1
if not leaveRedirect:
params['noredirect'] = 1
result = query.GetData(params, self.site(), sysop=sysop)
if 'error' in result:
err = result['error']['code']
if err == 'articleexists':
if safe:
output(
u'Page move failed: Target page [[%s]] already exists.'
% newtitle)
else:
try:
# Try to delete and move
return self.move(newtitle, reason, movetalkpage,
movesubpages, throttle=throttle,
deleteAndMove=True)
except NoUsername:
# We dont have the user rights to delete
output(u'Page moved failed: Target page [[%s]] already '
u'exists.' % newtitle)
#elif err == 'protectedpage':
#
else:
error("%s" % result)
return False
elif 'move' in result:
if deleteAndMove:
output(u'Page %s moved to %s, deleting the existing page'
% (self.title(), newtitle))
else:
output(u'Page %s moved to %s' % (self.title(), newtitle))
if hasattr(self, '_contents'):
## self.__init__(self.site(), newtitle,
## defaultNamespace=self._namespace)
try:
self.get(force=True, get_redirect=True, throttle=False)
except NoPage:
output(u'Page %s is moved and no longer exist.'
% self.title())
#delattr(self, '_contents')
return True
def _moveOld(self, newtitle, reason=None, movetalkpage=True,
movesubpages=False, sysop=False, throttle=True,
deleteAndMove=False, safe=True, fixredirects=True,
leaveRedirect=True):
# Login
try:
self.get()
except:
pass
sysop = self._getActionUser(action='move',
restriction=self.moveRestriction,
sysop=False)
if deleteAndMove:
sysop = self._getActionUser(action='delete', restriction='',
sysop=True)
# Check blocks
self.site().checkBlocks(sysop=sysop)
if throttle:
put_throttle()
if reason is None:
reason = input(u'Please enter a reason for the move:')
if self.isTalkPage():
movetalkpage = False
host = self.site().hostname()
address = self.site().move_address()
token = self.site().getToken(sysop=sysop)
predata = {
'wpOldTitle': self.title().encode(self.site().encoding()),
'wpNewTitle': newtitle.encode(self.site().encoding()),
'wpReason': reason.encode(self.site().encoding()),
}
if deleteAndMove:
predata['wpDeleteAndMove'] = self.site().mediawiki_message(
'delete_and_move_confirm')
predata['wpConfirm'] = '1'
if movetalkpage:
predata['wpMovetalk'] = '1'
else:
predata['wpMovetalk'] = '0'
if self.site().versionnumber() >= 13:
if fixredirects:
predata['wpFixRedirects'] = '1'
else:
predata['wpFixRedirects'] = '0'
if leaveRedirect:
predata['wpLeaveRedirect'] = '1'
else:
predata['wpLeaveRedirect'] = '0'
if movesubpages:
predata['wpMovesubpages'] = '1'
else:
predata['wpMovesubpages'] = '0'
if token:
predata['wpEditToken'] = token
response, data = self.site().postForm(address, predata, sysop=sysop)
if data == u'' or self.site().mediawiki_message(
'pagemovedsub') in data:
# Move Success
if deleteAndMove:
output(u'Page %s moved to %s, deleting the existing page'
% (self.title(), newtitle))
else:
output(u'Page %s moved to %s' % (self.title(), newtitle))
if hasattr(self, '_contents'):
## self.__init__(self.site(), newtitle,
## defaultNamespace=self._namespace)
try:
self.get(force=True, get_redirect=True, throttle=False)
except NoPage:
output(u'Page %s is moved and no longer exist.'
% self.title())
## delattr(self, '_contents')
return True
else:
#Move Failure
self.site().checkBlocks(sysop=sysop)
if self.site().mediawiki_message('articleexists') in data or \
self.site().mediawiki_message('delete_and_move') in data:
if safe:
output(
u'Page move failed: Target page [[%s]] already exists.'
% newtitle)
return False
else:
try:
# Try to delete and move
return self._moveOld(newtitle, reason, movetalkpage,
movesubpages, throttle=throttle,
deleteAndMove=True)
except NoUsername:
# We dont have the user rights to delete
output(u'Page moved failed: Target page [[%s]] already '
u'exists.' % newtitle)
return False
elif not self.exists():
raise NoPage(
u'Page move failed: Source page [[%s]] does not exist.'
% newtitle)
elif Page(self.site(), newtitle).exists():
# XXX : This might be buggy : if the move was successful, the
# target pase *has* been created
raise PageNotSaved(
u'Page move failed: Target page [[%s]] already exists.'
% newtitle)
else:
output(u'Page move failed for unknown reason.')
try:
ibegin = data.index('<!-- start content -->') + 22
iend = data.index('<!-- end content -->')
except ValueError:
# if begin/end markers weren't found, show entire HTML file
output(data)
else:
# otherwise, remove the irrelevant sections
data = data[ibegin:iend]
output(data)
return False
def delete(self, reason=None, prompt=True, throttle=True, mark=False):
"""Deletes the page from the wiki. Requires administrator status.
@param reason: The edit summary for the deletion. If None, ask for it.
@param prompt: If true, prompt user for confirmation before deleting.
@param mark: if true, and user does not have sysop rights, place a
speedy-deletion request on the page instead.
"""
# Login
try:
self._getActionUser(action='delete', sysop=True)
except NoUsername:
if mark and self.exists():
text = self.get(get_redirect=True)
output(u'Cannot delete page %s - marking the page for deletion '
u'instead:' % self.title(asLink=True))
# Note: Parameters to {{delete}}, and their meanings, vary from
# one Wikipedia to another. If you want or need to use them, you
# must be careful not to break others. Else don't.
# FIXME: Make some sort of configuration setting to keep track
# of the templates in different wiki's
self.put(u'{{speedydelete|1=%s --~~~~|bot=yes}}\n\n%s'
% (reason, text), comment=reason)
return
else:
raise
# Check blocks
self.site().checkBlocks(sysop=True)
if throttle:
put_throttle()
if reason is None:
output(u'Deleting %s.' % (self.title(asLink=True)))
reason = input(u'Please enter a reason for the deletion:')
answer = u'y'
if prompt and not hasattr(self.site(), '_noDeletePrompt'):
answer = inputChoice(u'Do you want to delete %s?' % self,
['yes', 'no', 'all'], ['y', 'N', 'a'], 'N')
if answer == 'a':
answer = 'y'
self.site()._noDeletePrompt = True
if answer == 'y':
token = self.site().getToken(self, sysop=True)
reason = reason.encode(self.site().encoding())
if self.site().has_api() and self.site().versionnumber() >= 12:
#API Mode
params = {
'action': 'delete',
'title': self.title(),
'token': token,
'reason': reason,
}
datas = query.GetData(params, self.site(), sysop=True)
if 'delete' in datas:
output(u'Page %s deleted' % self)
return True
else:
if datas['error']['code'] == 'missingtitle':
output(
u'Page %s could not be deleted - it doesn\'t exist'
% self)
else:
output(u'Deletion of %s failed for an unknown reason. '
u'The response text is:' % self)
output('%s' % datas)
return False
else:
#Ordinary mode from webpage.
host = self.site().hostname()
address = self.site().delete_address(self.urlname())
predata = {
'wpDeleteReasonList': 'other',
'wpReason': reason,
#'wpComment': reason, <- which version?
'wpConfirm': '1',
'wpConfirmB': '1',
'wpEditToken': token,
}
response, data = self.site().postForm(address, predata,
sysop=True)
if data:
self.site().checkBlocks(sysop=True)
if self.site().mediawiki_message('actioncomplete') in data:
output(u'Page %s deleted' % self)
return True
elif self.site().mediawiki_message('cannotdelete') in data:
output(
u'Page %s could not be deleted - it doesn\'t exist'
% self)
return False
else:
output(u'Deletion of %s failed for an unknown reason. '
u'The response text is:' % self)
try:
ibegin = data.index('<!-- start content -->') + 22
iend = data.index('<!-- end content -->')
except ValueError:
# if begin/end markers weren't found, show entire
# HTML file
output(data)
else:
# otherwise, remove the irrelevant sections
data = data[ibegin:iend]
output(data)
return False
def loadDeletedRevisions(self, step=None, total=None):
"""Retrieve all deleted revisions for this Page from Special/Undelete.
Stores all revisions' timestamps, dates, editors and comments in
self._deletedRevs attribute.
@return: list of timestamps (which can be used to retrieve
revisions later on).
"""
# Login
self._getActionUser(action='deletedhistory', sysop=True)
#TODO: Handle image file revisions too.
output(u'Loading list of deleted revisions for [[%s]]...'
% self.title())
self._deletedRevs = {}
if self.site().has_api() and self.site().versionnumber() >= 12:
params = {
'action': 'query',
'list': 'deletedrevs',
'drfrom': self.title(withNamespace=False),
'drnamespace': self.namespace(),
'drprop': ['revid', 'user', 'comment',
'content'], # ,'minor', 'len', 'token'],
'drlimit': 100,
'drdir': 'older',
#'': '',
}
count = 0
while True:
data = query.GetData(params, self.site(), sysop=True)
for x in data['query']['deletedrevs']:
if x['title'] != self.title():
continue
for y in x['revisions']:
count += 1
self._deletedRevs[
parsetime2stamp(y['timestamp'])
] = [y['timestamp'], y['user'], y['comment'],
y['*'], False]
if 'query-continue' in data:
if data['query-continue']['deletedrevs'].values()[0].split(
'|')[1] == self.title(withNamespace=False):
params.update(data['query-continue']['deletedrevs'])
else:
break
else:
break
self._deletedRevsModified = False
else:
address = self.site().undelete_view_address(self.urlname())
text = self.site().getUrl(address, sysop=True)
# TODO: Handle non-existent pages etc
rxRevs = re.compile(
r'<input name="(?P<ts>(?:ts|fileid)\d+)".*?title=".*?">'
r'(?P<date>.*?)</a>.*?title=".*?">(?P<editor>.*?)</a>.*?'
r'<span class="comment">\((?P<comment>.*?)\)</span>',
re.DOTALL)
for rev in rxRevs.finditer(text):
self._deletedRevs[rev.group('ts')] = [
rev.group('date'),
rev.group('editor'),
rev.group('comment'),
None, # Revision text
False, # Restoration marker
]
self._deletedRevsModified = False
return self._deletedRevs.keys()
def getDeletedRevision(self, timestamp, retrieveText=False):
"""Return a particular deleted revision by timestamp.
@return: a list of [date, editor, comment, text, restoration
marker]. text will be None, unless retrieveText is True (or has
been retrieved earlier). If timestamp is not found, returns
None.
"""
if self._deletedRevs is None:
self.loadDeletedRevisions()
if timestamp not in self._deletedRevs:
#TODO: Throw an exception instead?
return
if retrieveText and not self._deletedRevs[timestamp][3] and \
timestamp[:2] == 'ts':
# Login
self._getActionUser(action='delete', sysop=True)
output(u'Retrieving text of deleted revision...')
address = self.site().undelete_view_address(self.urlname(),
timestamp)
text = self.site().getUrl(address, sysop=True)
und = re.search('<textarea readonly="1" cols="80" rows="25">'
'(.*?)</textarea><div><form method="post"',
text, re.DOTALL)
if und:
self._deletedRevs[timestamp][3] = und.group(1)
return self._deletedRevs[timestamp]
def markDeletedRevision(self, timestamp, undelete=True):
"""Mark the revision identified by timestamp for undeletion.
@param undelete: if False, mark the revision to remain deleted.
"""
if self._deletedRevs is None:
self.loadDeletedRevisions()
if timestamp not in self._deletedRevs:
#TODO: Throw an exception?
return
self._deletedRevs[timestamp][4] = undelete
self._deletedRevsModified = True
def undelete(self, comment=None, throttle=True):
"""Undelete page based on the undeletion markers set by previous calls.
If no calls have been made since loadDeletedRevisions(), everything
will be restored.
Simplest case:
Page(...).undelete('This will restore all revisions')
More complex:
pg = Page(...)
revs = pg.loadDeletedRevsions()
for rev in revs:
if ... #decide whether to undelete a revision
pg.markDeletedRevision(rev) #mark for undeletion
pg.undelete('This will restore only selected revisions.')
@param comment: The undeletion edit summary.
"""
# Login
self._getActionUser(action='undelete', sysop=True)
# Check blocks
self.site().checkBlocks(sysop=True)
token = self.site().getToken(self, sysop=True)
if comment is None:
output(u'Preparing to undelete %s.'
% (self.title(asLink=True)))
comment = input(u'Please enter a reason for the undeletion:')
if throttle:
put_throttle()
if self.site().has_api() and self.site().versionnumber() >= 12:
params = {
'action': 'undelete',
'title': self.title(),
'reason': comment,
'token': token,
}
if self._deletedRevs and self._deletedRevsModified:
selected = []
for ts in self._deletedRevs:
if self._deletedRevs[ts][4]:
selected.append(ts)
params['timestamps'] = ts,
result = query.GetData(params, self.site(), sysop=True)
if 'error' in result:
raise RuntimeError("%s" % result['error'])
elif 'undelete' in result:
output(u'Page %s undeleted' % self.title(asLink=True))
return result
else:
address = self.site().undelete_address()
formdata = {
'target': self.title(),
'wpComment': comment,
'wpEditToken': token,
'restore': self.site().mediawiki_message('undeletebtn')
}
if self._deletedRevs and self._deletedRevsModified:
for ts in self._deletedRevs:
if self._deletedRevs[ts][4]:
formdata['ts' + ts] = '1'
self._deletedRevs = None
# TODO: Check for errors below (have we succeeded? etc):
result = self.site().postForm(address, formdata, sysop=True)
output(u'Page %s undeleted' % self.title(asLink=True))
return result
def protect(self, editcreate='sysop', move='sysop', unprotect=False,
reason=None, editcreate_duration='infinite',
move_duration='infinite', cascading=False, prompt=True,
throttle=True):
"""(Un)protect a wiki page. Requires administrator status.
If the title is not exist, the protection only ec (aka edit/create)
available. If reason is None, asks for a reason. If prompt is True,
asks the user if he wants to protect the page. Valid values for ec and
move are:
* '' (equivalent to 'none')
* 'autoconfirmed'
* 'sysop'
"""
# Login
self._getActionUser(action='protect', sysop=True)
# Check blocks
self.site().checkBlocks(sysop=True)
## # check protect level if edit/move not same
## if self.exists() and editcreate != move:
## if editcreate == 'sysop' and move != 'sysop':
## raise Error("The level configuration is not safe")
if unprotect:
editcreate = move = ''
else:
editcreate, move = editcreate.lower(), move.lower()
if throttle:
put_throttle()
if reason is None:
reason = input(u'Please enter a reason for the change of the '
u'protection level:')
reason = reason.encode(self.site().encoding())
answer = 'y'
if prompt and not hasattr(self.site(), '_noProtectPrompt'):
answer = inputChoice(
u'Do you want to change the protection level of %s?' % self,
['Yes', 'No', 'All'], ['Y', 'N', 'A'], 'N')
if answer == 'a':
answer = 'y'
self.site()._noProtectPrompt = True
if answer == 'y':
if not self.site().has_api() or self.site().versionnumber() < 12:
return self._oldProtect(editcreate, move, unprotect, reason,
editcreate_duration, move_duration,
cascading, prompt, throttle)
token = self.site().getToken(self, sysop=True)
# Translate 'none' to ''
protections = []
expiry = []
if editcreate == 'none':
editcreate = 'all'
if move == 'none':
move = 'all'
if editcreate_duration == 'none' or not editcreate_duration:
editcreate_duration = 'infinite'
if move_duration == 'none' or not move_duration:
move_duration = 'infinite'
if self.exists():
protections.append("edit=%s" % editcreate)
protections.append("move=%s" % move)
expiry.append(move_duration)
else:
protections.append("create=%s" % editcreate)
expiry.append(editcreate_duration)
params = {
'action': 'protect',
'title': self.title(),
'token': token,
'protections': protections,
'expiry': expiry,
}
if reason:
params['reason'] = reason
if cascading:
if (editcreate != 'sysop' or move != 'sysop' or
not self.exists()):
# You can't protect a page as autoconfirmed and cascading,
# prevent the error
# Cascade only available exists page, create prot. not.
output(
u"NOTE: The page can't be protected with cascading and "
u"not also with only-sysop. Set cascading \"off\"")
else:
params['cascade'] = 1
result = query.GetData(params, self.site(), sysop=True)
if 'error' in result: # error occured
err = result['error']['code']
error('%s' % result)
#if err == '':
#
#elif err == '':
#
else:
if result['protect']:
output(u'Changed protection level of page %s.'
% self.title(asLink=True))
return True
return False
def _oldProtect(self, editcreate='sysop', move='sysop', unprotect=False,
reason=None, editcreate_duration='infinite',
move_duration='infinite', cascading=False, prompt=True,
throttle=True):
"""internal use for protect page by ordinary web page form"""
host = self.site().hostname()
token = self.site().getToken(sysop=True)
# Translate 'none' to ''
if editcreate == 'none':
editcreate = ''
if move == 'none':
move = ''
# Translate no duration to infinite
if editcreate_duration == 'none' or not editcreate_duration:
editcreate_duration = 'infinite'
if move_duration == 'none' or not move_duration:
move_duration = 'infinite'
# Get cascading
if not cascading:
cascading = '0'
else:
if editcreate != 'sysop' or move != 'sysop' or not self.exists():
# You can't protect a page as autoconfirmed and cascading,
# prevent the error
# Cascade only available exists page, create prot. not.
cascading = '0'
output(u"NOTE: The page can't be protected with cascading and "
u"not also with only-sysop. Set cascading \"off\"")
else:
cascading = '1'
if unprotect:
address = self.site().unprotect_address(self.urlname())
else:
address = self.site().protect_address(self.urlname())
predata = {}
if self.site().versionnumber >= 10:
predata['mwProtect-cascade'] = cascading
predata['mwProtect-reason'] = reason
if not self.exists(): # and self.site().versionnumber() >= :
#create protect
predata['mwProtect-level-create'] = editcreate
predata['wpProtectExpirySelection-create'] = editcreate_duration
else:
#edit/move Protect
predata['mwProtect-level-edit'] = editcreate
predata['mwProtect-level-move'] = move
if self.site().versionnumber() >= 14:
predata['wpProtectExpirySelection-edit'] = editcreate_duration
predata['wpProtectExpirySelection-move'] = move_duration
else:
predata['mwProtect-expiry'] = editcreate_duration
if token:
predata['wpEditToken'] = token
response, data = self.site().postForm(address, predata, sysop=True)
if response.code == 302 and not data:
output(u'Changed protection level of page %s.'
% self.title(asLink=True))
return True
else:
#Normally, we expect a 302 with no data, so this means an error
self.site().checkBlocks(sysop=True)
output(u'Failed to change protection level of page %s:'
% self.title(asLink=True))
output(u"HTTP response code %s" % response.code)
output(data)
return False
def removeImage(self, image, put=False, summary=None, safe=True):
"""Remove all occurrences of an image from this Page."""
# TODO: this should be grouped with other functions that operate on
# wiki-text rather than the Page object
return self.replaceImage(image, None, put, summary, safe)
def replaceImage(self, image, replacement=None, put=False, summary=None,
safe=True):
"""Replace all occurences of an image by another image.
Giving None as argument for replacement will delink instead of
replace.
The argument image must be without namespace and all spaces replaced
by underscores.
If put is False, the new text will be returned. If put is True, the
edits will be saved to the wiki and True will be returned on succes,
and otherwise False. Edit errors propagate.
"""
# TODO: this should be grouped with other functions that operate on
# wiki-text rather than the Page object
# Copyright (c) Orgullomoore, Bryan
# TODO: document and simplify the code
site = self.site()
text = self.get()
new_text = text
def capitalizationPattern(s):
"""
Given a string, creates a pattern that matches the string, with
the first letter case-insensitive if capitalization is switched
on on the site you're working on.
"""
if self.site().nocapitalize:
return re.escape(s)
else:
return ur'(?:[%s%s]%s)' % (re.escape(s[0].upper()),
re.escape(s[0].lower()),
re.escape(s[1:]))
namespaces = set(site.namespace(6, all=True) + site.namespace(-2,
all=True))
# note that the colon is already included here
namespacePattern = ur'\s*(?:%s)\s*\:\s*' % u'|'.join(namespaces)
imagePattern = u'(%s)' % capitalizationPattern(image).replace(r'\_',
'[ _]')
def filename_replacer(match):
if replacement is None:
return u''
else:
old = match.group()
return old[:match.start('filename')] + replacement + old[
match.end('filename'):]
# The group params contains parameters such as thumb and 200px, as well
# as the image caption. The caption can contain wiki links, but each
# link has to be closed properly.
paramPattern = r'(?:\|(?:(?!\[\[).|\[\[.*?\]\])*?)'
rImage = re.compile(
ur'\[\[(?P<namespace>%s)(?P<filename>%s)(?P<params>%s*?)\]\]'
% (namespacePattern, imagePattern, paramPattern))
if replacement is None:
new_text = rImage.sub('', new_text)
else:
new_text = rImage.sub('[[\g<namespace>%s\g<params>]]'
% replacement, new_text)
# Remove the image from galleries
galleryR = re.compile(r'(?is)<gallery>(?P<items>.*?)</gallery>')
galleryItemR = re.compile(
r'(?m)^%s?(?P<filename>%s)\s*(?P<label>\|.*?)?\s*$'
% (namespacePattern, imagePattern))
def gallery_replacer(match):
return ur'<gallery>%s</gallery>' \
% galleryItemR.sub(filename_replacer, match.group('items'))
new_text = galleryR.sub(gallery_replacer, new_text)
if (text == new_text) or (not safe):
# All previous steps did not work, so the image is
# likely embedded in a complicated template.
# Note: this regular expression can't handle nested templates.
templateR = re.compile(ur'(?s)\{\{(?P<contents>.*?)\}\}')
fileReferenceR = re.compile(u'%s(?P<filename>(?:%s)?)'
% (namespacePattern, imagePattern))
def template_replacer(match):
return fileReferenceR.sub(filename_replacer, match.group(0))
new_text = templateR.sub(template_replacer, new_text)
if put:
if text != new_text:
# Save to the wiki
self.put(new_text, summary)
return True
return False
else:
return new_text
## @since 10310
# @remarks needed by various bots
def purgeCache(self):
"""Purges the page cache with API.
( non-api purge can be done with Page.purge_address() )
"""
# Make sure we re-raise an exception we got on an earlier attempt
if hasattr(self, '_getexception'):
return self._getexception
# call the wiki to execute the request
params = {
u'action': u'purge',
u'titles': self.title(),
u'forcelinkupdate': '',
}
pywikibot.get_throttle()
pywikibot.output(u"Purging page cache for %s."
% self.title(asLink=True))
result = query.GetData(params, self.site())
r = result[u'purge'][0]
# store and return info
if u'missing' in r:
self._getexception = pywikibot.NoPage
raise pywikibot.NoPage(
self.site(), self.title(asLink=True),
"Page does not exist. Was not able to purge cache!")
return u'purged' in r
class DataPage(Page):
"""A subclass of Page representing a page on wikidata.
Supports the same interface as Page, with the following added methods:
setitem : Setting item(s) on a page
editclaim : Create and/or set the value of a Wikibase claim
createitem : Create an item
get : Getting item(s) of a page (like entity, ...)
getentities : Get the data for multiple Wikibase entities
DEPRECATED: please use get() instead of getentities()
searchentities : Search for entities
linktitles : Associate articles on two different wikis with an item
IMPLEMENTATION: open - not done yet
removeclaim : Remove Wikibase claims
removereferences : Remove one or more references of the same statement
IMPLEMENTATION: open - not done yet
"""
def __init__(self, source, title=None, *args, **kwargs):
if isinstance(source, basestring):
source = getSite(source)
elif isinstance(source, Page):
title = source.title()
source = source.site
elif isinstance(source, int):
title = "Q%d" % source
source = getSite().data_repository()
self._originSite = source
self._originTitle = title
source = self._originSite.data_repository()
Page.__init__(self, source, title, *args, **kwargs)
self._siteTitle = self._originSite.dbName().split(
'_')[0].replace("-", "_")
if not (self._originSite == source):
self._title = None
def getID(self):
items = self.get()
return int(self.title()[1:])
def setSitelink(self, page, summary=""):
"""Set a Sitelink for a Datapage.
@param page: the site to link to
@type page: pywikibot.Page object
@param summary: edit summary
@type summary: basestring
"""
sitename = page.site().dbName().replace("_p", "")
return self.setitem(summary=summary,
items={'type': 'sitelink',
'site': sitename,
'title': page.title()})
def setitem(self, summary=None, watchArticle=False, minorEdit=True,
newPage=False, token=None, newToken=False, sysop=False,
captcha=None, botflag=True, maxTries=-1, items={}):
"""Setting items on a specific page
@param items: a dictionary of item(s) you want to add
@type items: dict
use of these ways:
#for change Persian language label of a page to "OK"
items={'type': u'item', 'label': 'fa', 'value': 'OK'})
#for change English language description of a page to "OK"
items={'type': u'description', 'language': 'en', 'value': 'OK'})
#for change German language sitelink of a page to "OK"
items={'type': u'sitelink', 'site': 'de', 'title': 'OK'})
"""
retry_attempt = 0
retry_delay = 1
dblagged = False
params = {
'title': self._originTitle,
'summary': self._encodeArg(summary, 'summary'),
}
params['site'] = self._siteTitle
if self._title:
del params['site']
params['id'] = self._title.strip()
del params['title']
params['format'] = 'jsonfm'
if items['type'] == u'item':
params['value'] = items['value']
params['language'] = items['label']
items['type'] = u'label'
elif items['type'] == u'description':
params['value'] = items['value']
params['language'] = items['language']
elif items['type'] == u'sitelink':
if "wiki" in items['site']:
params['linksite'] = items['site']
else:
params['linksite'] = items['site'] + u'wiki'
params['linktitle'] = items['title']
elif items['type'] == u'label':
raise NotImplementedError(
u'Wikidata action type "%s" was not implemented yet'
% items['type'])
elif items['type'] == u'aliases':
raise NotImplementedError(
u'Wikidata action type "%s" was not implemented yet'
% items['type'])
else:
raise NotImplementedError(
u'Wikidata action type "%s" is unknown' % items['type'])
params['action'] = u'wbset' + items['type']
if token:
params['token'] = token
else:
params['token'] = self.site().getToken(sysop=sysop)
if config.maxlag:
params['maxlag'] = str(config.maxlag)
if botflag:
params['bot'] = 1
if watchArticle:
params['watch'] = 1
if captcha:
params['captchaid'] = captcha['id']
params['captchaword'] = captcha['answer']
while True:
if (maxTries == 0):
raise MaxTriesExceededError()
maxTries -= 1
# Check whether we are not too quickly after the previous
# putPage, and wait a bit until the interval is acceptable
if not dblagged:
put_throttle()
# Which web-site host are we submitting to?
if newPage:
output(u'Creating page %s via API' % self)
params['createonly'] = 1
else:
output(u'Updating page %s via API' % self)
params['nocreate'] = 1
try:
response, data = query.GetData(params, self.site(),
sysop=sysop, back_response=True)
if isinstance(data, basestring):
raise KeyError
except httplib.BadStatusLine, line:
raise PageNotSaved('Bad status line: %s' % line.line)
except ServerError:
exception(tb=True)
retry_attempt += 1
if retry_attempt > config.maxretries:
raise
output(u'Got a server error when putting %s; '
u'will retry in %i minute%s.'
% (self, retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
except ValueError: # API result cannot decode
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
# If it has gotten this far then we should reset dblagged
dblagged = False
# Check blocks
self.site().checkBlocks(sysop=sysop)
# A second text area means that an edit conflict has occured.
if response.code == 500:
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if 'error' in data:
errorCode = data['error']['info']
output(u'Got an unknown error when putting data: %s'
% errorCode)
else:
if data['success'] == u"1":
return 302, response.msg, data['success']
return 302, response.msg, False
def createitem(self, summary=u'', value=None, watchArticle=False,
minorEdit=True, token=None, newToken=False, sysop=False,
captcha=None, botflag=True, maxTries=-1):
"""Creating an item
usage:
data.createitem(summary)
"""
retry_attempt = 0
retry_delay = 1
dblagged = False
newPage = True
params = {
'summary': self._encodeArg(summary, 'summary'),
'format': 'jsonfm',
'action': 'wbeditentity',
'new': 'item',
}
if not value:
params['data'] = (u'{"labels": {"%(lang)s": {"language": "%(lang)s", '
u'"value": "%(title)s"}}, "sitelinks": {"%(site)s": '
u'{"site": "%(site)s", "title": "%(title)s"}}}'
% {'lang': self._originSite.lang,
'title': self._originTitle,
'site': self._siteTitle})
else:
value = json.dumps(value)
value = value.replace("'", '"')
params['data'] = value
if token:
params['token'] = token
else:
params['token'] = self.site().getToken(sysop=sysop)
if config.maxlag:
params['maxlag'] = str(config.maxlag)
if botflag:
params['bot'] = 1
if watchArticle:
params['watch'] = 1
if captcha:
params['captchaid'] = captcha['id']
params['captchaword'] = captcha['answer']
while True:
if (maxTries == 0):
raise MaxTriesExceededError()
maxTries -= 1
# Check whether we are not too quickly after the previous
# putPage, and wait a bit until the interval is acceptable
if not dblagged:
put_throttle()
output(u'Creating page %s via API' % self._originTitle)
params['createonly'] = 1
try:
response, data = query.GetData(params, self.site(),
sysop=sysop, back_response=True)
if isinstance(data, basestring):
raise KeyError
except httplib.BadStatusLine, line:
raise PageNotSaved('Bad status line: %s' % line.line)
except ServerError:
exception(tb=True)
retry_attempt += 1
if retry_attempt > config.maxretries:
raise
output(u'Got a server error when putting %s; '
u'will retry in %i minute%s.'
% (self, retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
except ValueError: # API result cannot decode
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
# If it has gotten this far then we should reset dblagged
dblagged = False
# Check blocks
self.site().checkBlocks(sysop=sysop)
# A second text area means that an edit conflict has occured.
if response.code == 500:
output(u"Server error encountered; will retry in %i minute%s."
% (retry_delay, retry_delay != 1 and "s" or ""))
time.sleep(60 * retry_delay)
retry_delay *= 2
if retry_delay > 30:
retry_delay = 30
continue
if 'error' in data:
errorCode = data['error']['code']
output(u'Got an unknown error when putting data: %s'
% errorCode)
else:
if data['success'] == u"1":
return 302, response.msg, data['success']
return response.code, response.msg, data
def editclaim(self, WDproperty, value, data_type=None, raw_value=False,
refs=None, comment=None, token=None, sysop=False,
botflag=True, override=True):
if isinstance(WDproperty, int):
propertyID = WDproperty
elif isinstance(WDproperty, basestring):
try:
propertyID = int(WDproperty)
except ValueError:
try:
propertyID = int(
WDproperty.replace("p", "").replace("P", ""))
except ValueError:
search = self.searchentities(WDproperty, 'property',
lang=self._originSite.lang)
propertyID = int(search[0]["id"].replace("p", ""))
else:
pass
else:
pass
else:
raise RuntimeError("Unknown property type: %s" % WDproperty)
if not raw_value and not data_type == "time":
if isinstance(value, int): # for 'quantity' entity-type
value = "{\"entity-type\":\"item\",\"numeric-id\":%s}" % value
elif isinstance(value, unicode): # for 'string' entity-type
value = json.dumps(value)
elif isinstance(value, basestring): # for 'quantity' entity-type
try:
value = int(value)
except ValueError:
try:
value = int(value.replace("q", "").replace("Q", ""))
except ValueError:
search = self.searchentities(value, 'item',
lang=self._originSite.lang)
value = int(search[0]["id"][1:])
else:
pass
else:
pass
value = "{\"entity-type\":\"item\",\"numeric-id\":%s}" % value
else:
raise RuntimeError("Unknown property type: %s" % value)
value = "{\"entity-type\":\"item\",\"numeric-id\":%s}" % value
elif data_type == "time":
value = "{\"time\":\"%s\",\"timezone\":0,\"before\":0,\"after\":0,\"precision\":11,\"calendarmodel\":\"http://www.wikidata.org/entity/Q1985727\"}" \
% value
# about calendarmodel see
# https://bugzilla.wikimedia.org/show_bug.cgi?id=48965
else:
pass
claims = self.get()['claims']
theclaim = None
for claim in claims:
if claim['m'][1] == propertyID:
theclaim = claim
if theclaim and override:
params = {
'action': 'wbsetclaimvalue',
'claim': theclaim['g'],
'snaktype': 'value',
'value': value,
}
params['token'] = token or self.site().getToken(sysop=sysop)
if botflag:
params['bot'] = 1
output(u"Changing claim in item %s" % self.title())
data = query.GetData(params, self.site(), sysop=sysop)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if (u'warnings' in data and
not (data[u'warnings'][u'messages'][u'0'][u'name']
== u'edit-no-change')):
warning(str(data[u'warnings']))
guid = theclaim['g']
else:
params = {
'action': 'wbcreateclaim',
'entity': self.title(),
'snaktype': 'value',
'property': "p%d" % propertyID,
'value': value,
}
params['token'] = token or self.site().getToken(sysop=sysop)
if botflag:
params['bot'] = 1
output(u"Creating claim in item %s" % self.title())
data = query.GetData(params, self.site(), sysop=sysop)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if 'warnings' in data:
warning(str(data[u'warnings']))
guid = data['claim']['id'] if 'claim' in data else ''
if refs:
snak = []
if isinstance(refs, dict):
# the references must be like this:
# {"p?": [{"snaktype": "value",
# "property":"p?",
# "datavalue": {u'type': u'string', u'value': u'...'}},
# {"snaktype": "value", ... }}, ]}
pass
elif isinstance(refs, set):
# the references must be like this:
# {(ref1, value1), (ref2, value2)}
for ref in refs:
if isinstance(ref, basestring):
raise RuntimeError(
"the references must be like this: {(ref1, value1),"
"(ref2, value2)}")
for i in range(2):
if isinstance(ref[i], int):
value = ref[i]
elif isinstance(ref[i], basestring):
try:
value = int(ref[i])
except ValueError:
try:
value = int(ref[i][1:])
except ValueError:
if i == 0:
typesearch = 'property'
else:
typesearch = 'item'
search = self.searchentities(
ref[i], typesearch,
lang=self._originSite.lang)
value = int(
search[0]["id"][1:])
else:
pass
else:
pass
else:
raise RuntimeError("Unknown item: %s" % ref[i])
snak.append(value)
else:
raise RuntimeError(
"the references format cannot be understood!")
if snak:
finalsnak = {}
for i in range(0, len(snak) / 2):
snaki = [
{"snaktype": "value",
"property": "p" + str(snak[i * 2]),
"datavalue": {"type": "wikibase-entityid",
"value": {"entity-type": "item",
"numeric-id": snak[
(i * 2) + 1]}}}]
finalsnak["p%d" % snak[i * 2]] = snaki
else:
finalsnak = refs
finalsnak = json.dumps(finalsnak)
finalsnak = finalsnak.replace("'", '"')
params = {
'action': 'wbsetreference',
'statement': guid,
'snaks': u"%s" % finalsnak,
'bot': '1'
}
if token:
params['token'] = token
else:
params['token'] = self.site().getToken(sysop=sysop)
if botflag:
params['bot'] = 1
output(u"Adding references to claim in %s" % self.title())
data = query.GetData(params, self.site(), sysop=sysop)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if (u'warnings' in data and
not (data[u'warnings'][u'messages'][u'0'][u'name']
== u'edit-no-change')):
warning(str(data[u'warnings']))
def removeclaim(self, WDproperty, value=None, raw_value=False,
botflag=True, token=None, sysop=False):
if isinstance(WDproperty, int):
propertyID = WDproperty
elif isinstance(WDproperty, basestring):
try:
propertyID = int(WDproperty)
except ValueError:
try:
propertyID = int(
WDproperty.replace("p", "").replace("P", ""))
except ValueError:
search = self.searchentities(WDproperty, 'property',
lang=self._originSite.lang)
propertyID = int(search[0]["id"].replace("p", ""))
else:
pass
else:
pass
else:
raise RuntimeError("Unknown property type: %s" % WDproperty)
if not raw_value:
if isinstance(value, int) or not value:
pass
elif isinstance(value, unicode): # for 'string' entity-type
value = json.dumps(value)
elif isinstance(value, basestring): # for 'quantity' entity-type
try:
value = int(value)
except ValueError:
try:
value = int(value.replace("q", "").replace("Q", ""))
except ValueError:
search = self.searchentities(value, 'item',
lang=self._originSite.lang)
value = int(search[0]["id"][1:])
else:
pass
else:
pass
else:
raise RuntimeError("Unknown property type: %s" % value)
else:
pass
claims = self.get()['claims']
theclaim = []
for claim in claims:
if claim['m'][1] == propertyID and (
int(claim['m'][3][u'numeric-id']) == value or not value):
theclaim.append(claim['g'])
if not theclaim:
raise RuntimeError("The claim %s hasn't been used in the item"
% propertyID)
params = {
'action': 'wbremoveclaims',
'claim': "|".join(theclaim),
}
if token:
params['token'] = token
else:
params['token'] = self.site().getToken(sysop=sysop)
if botflag:
params['bot'] = 1
output(u"Removing claim from %s" % self.title())
data = query.GetData(params, self.site(), sysop=sysop)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if 'warnings' in data:
warning(str(data[u'warnings']))
def _getentity(self, force=False, get_redirect=False, throttle=True,
sysop=False, change_edit_time=True):
"""Returns items of a entity in a dictionary
"""
params = {
'action': 'query',
'titles': self._originTitle,
'prop': ['revisions', 'info'],
'rvprop': ['content', 'ids', 'flags', 'timestamp', 'user',
'comment', 'size'],
'rvlimit': 1,
'inprop': ['protection', 'subjectid'],
}
params1 = params.copy()
params['action'] = 'wbgetentities'
params['sites'] = self._siteTitle
del params['prop']
del params['rvprop']
del params['rvlimit']
del params['inprop']
textareaFound = False
# retrying loop is done by query.GetData
data = query.GetData(params, self.site(), sysop=sysop)
data['query'] = {'pages': data['entities']}
for pageid in data['entities'].keys():
if pageid == "-1":
raise NoPage(self.site(), unicode(self),
"API query error, no pages found: %s" % data)
params1['titles'] = pageid
ndata = query.GetData(params1, self.site(), sysop=sysop)
data['entities'].update(ndata['query']['pages'])
data['query']['pages'].update(data['entities'])
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if not 'pages' in data['query']:
raise NoPage(self.site(), unicode(self),
"API query error, no pages found: %s" % data)
pageInfo = ndata['query']['pages'].values()[0]
if data['query']['pages'].keys()[0] == "-1":
if 'missing' in pageInfo:
raise NoPage(self.site(), unicode(self), "Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
elif 'revisions' in pageInfo: # valid Title
lastRev = pageInfo['revisions'][0]
if isinstance(lastRev['*'], basestring):
textareaFound = True
# I got page date with 'revisions' in pageInfo but
# lastRev['*'] = False instead of the content. The Page itself was
# deleted but there was not 'missing' in pageInfo as expected
# I raise a ServerError() yet, but maybe it should be NoPage().
if not textareaFound:
if verbose:
output(unicode(pageInfo))
raise ServerError('ServerError: No textarea found in %s' % self)
self.editRestriction = ''
self.moveRestriction = ''
# Note: user may be hidden and mw returns 'userhidden' flag
if 'userhidden' in lastRev:
self._userName = None
else:
self._userName = lastRev['user']
self._ipedit = 'anon' in lastRev
try:
self._comment = lastRev['comment']
except KeyError:
self._comment = None
for restr in pageInfo['protection']:
if restr['type'] == 'edit':
self.editRestriction = restr['level']
elif restr['type'] == 'move':
self.moveRestriction = restr['level']
self._revisionId = lastRev['revid']
if change_edit_time:
self._editTime = parsetime2stamp(lastRev['timestamp'])
if "starttimestamp" in pageInfo:
self._startTime = parsetime2stamp(pageInfo["starttimestamp"])
self._isWatched = False # cannot handle in API in my research for now.
pagetext = lastRev['*']
pagetext = pagetext.rstrip()
# pagetext must not decodeEsperantoX() if loaded via API
m = self.site().redirectRegex().match(pagetext)
if m:
# page text matches the redirect pattern
if self.section() and not "#" in m.group(1):
redirtarget = "%s#%s" % (m.group(1), self.section())
else:
redirtarget = m.group(1)
if get_redirect:
self._redirarg = redirtarget
else:
raise IsRedirectPage(redirtarget)
if self.section() and \
not does_text_contain_section(pagetext, self.section()):
try:
self._getexception
except AttributeError:
raise SectionError # Page has no section by this name
self._contents = json.loads(pagetext)
if self._contents['entity'][0] == 'item':
self._title = "Q"
elif self._contents['entity'][0] == 'property':
self._title = "P"
else:
raise RuntimeError("unknown type: %s call User:Ladsgroup to fix "
"this" % self._contents['entity'][0])
self._title += str(self._contents['entity'][1])
return self._contents
@deprecate_arg("get", None)
def getentities(self, sysop=False):
"""API module to get the data for multiple Wikibase entities.
"""
params = {
'action': 'wbgetentities',
'ids': self.title(),
}
# retrying is done by query.GetData
data = query.GetData(params, self.site(), sysop=sysop)
entities = data['entities'][self.title().lower()]
## debuginfo = data['debuginfo']
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
pageInfo = entities
if 'missing' in pageInfo:
raise NoPage(self.site(), unicode(self), "Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
return entities
def searchentities(self, search, entitytype=None, lang='en', sysop=False):
"""API module to search for entities.
(independent of page object and could thus be extracted from this class)
"""
params = {
'action': 'wbsearchentities',
'search': search,
## 'language': self.site().language(),
'language': lang,
}
if entitytype:
params['type'] = entitytype
# retrying is done by query.GetData
search = []
while True:
data = query.GetData(params, self.site(), sysop=sysop)
search += data['search']
if u'search-continue' in data:
params[u'continue'] = data[u'search-continue']
else:
break
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
pageInfo = search
if 'missing' in pageInfo:
raise NoPage(self.site(), unicode(self), "Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
return search
def get(self, *args, **kwargs):
if not hasattr(self, '_contents'):
if self._title is None:
self._getentity(*args, **kwargs)
else:
pagetext = super(DataPage, self).get(*args, **kwargs)
self._contents = json.loads(pagetext)
return self._contents
def isEmpty(self):
return not self.exists()
def interwiki(self, family='wikipedia'):
"""Return a list of interwiki links from data repository.
The return value is a list of Page objects for each of the
interwiki links.
@param family: Which family of links to get
@type family: str
"""
if family == 'wikipedia':
suffix = 'wiki'
else:
suffix = family
links = self.get()['links']
self._interwiki = [Page(getSite(code.replace(suffix,
'').replace('_', '-'),
fam=family),
links[code]) for code in links
if code.endswith(suffix)]
return self._interwiki
def linktitles(self):
raise NotImplementedError(
u'Wikidata member function "wblinktitles" was not implemented yet')
def removereferences(self):
raise NotImplementedError(
u'Wikidata member function "wbremovereferences" was not '
u'implemented yet')
wikidataPage = DataPage # keep compatible
class ImagePage(Page):
"""A subclass of Page representing an image descriptor wiki page.
Supports the same interface as Page, with the following added methods:
getImagePageHtml : Download image page and return raw HTML text.
fileURL : Return the URL for the image described on this
page.
fileIsOnCommons : Return True if image stored on Wikimedia
Commons.
fileIsShared : Return True if image stored on a shared
repository like Wikimedia Commons or Wikitravel.
getFileMd5Sum : Return image file's MD5 checksum.
getFileVersionHistory : Return the image file's version history.
getFileVersionHistoryTable: Return the version history in the form of a
wiki table.
usingPages : Yield Pages on which the image is displayed.
globalUsage : Yield Pages on which the image is used globally
"""
def __init__(self, site, title, insite=None):
Page.__init__(self, site, title, insite, defaultNamespace=6)
if self.namespace() != 6:
raise ValueError(u"'%s' is not in the image namespace!" % title)
self._imagePageHtml = None
self._local = None
self._latestInfo = {}
self._infoLoaded = False
def getImagePageHtml(self):
"""
Download the image page, and return the HTML, as a unicode string.
Caches the HTML code, so that if you run this method twice on the
same ImagePage object, the page will only be downloaded once.
"""
if not self._imagePageHtml:
path = self.site().get_address(self.urlname())
self._imagePageHtml = self.site().getUrl(path)
return self._imagePageHtml
def _loadInfo(self, limit=1):
params = {
'action': 'query',
'prop': 'imageinfo',
'titles': self.title(),
'iiprop': ['timestamp', 'user', 'comment', 'url', 'size',
'dimensions', 'sha1', 'mime', 'metadata', 'archivename',
'bitdepth'],
'iilimit': limit,
}
try:
data = query.GetData(params, self.site())
except NotImplementedError:
output("API not work, loading page HTML.")
self.getImagePageHtml()
return
if 'error' in data:
raise RuntimeError("%s" % data['error'])
count = 0
pageInfo = data['query']['pages'].values()[0]
self._local = pageInfo["imagerepository"] != "shared"
if data['query']['pages'].keys()[0] == "-1":
if 'missing' in pageInfo and self._local:
raise NoPage(self.site(), unicode(self),
"Page does not exist.")
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
infos = []
try:
while True:
for info in pageInfo['imageinfo']:
count += 1
if count == 1 and 'iistart' not in params:
# count 1 and no iicontinue mean first image revision
# is latest.
self._latestInfo = info
infos.append(info)
if limit == 1:
break
if 'query-continue' in data and limit != 1:
params.update(data['query-continue']['imageinfo'])
else:
break
except KeyError:
output("No image in imagepage")
self._infoLoaded = True
if limit > 1:
return infos
def fileUrl(self):
"""Return the URL for the image described on this page."""
# There are three types of image pages:
# * normal, small images with links like: filename.png
# (10KB, MIME type: image/png)
# * normal, large images with links like: Download high resolution
# version (1024x768, 200 KB)
# * SVG images with links like: filename.svg (1KB, MIME type: image/svg)
# This regular expression seems to work with all of them. The part after
# the | is required for copying .ogg files from en:, as they do not have
# a "full image link" div. This might change in the future; on commons,
# there is a full image link for .ogg and .mid files.
# ***********************
# change to API query:
# action=query&titles=File:wiki.jpg&prop=imageinfo&iiprop=url
if not self._infoLoaded:
self._loadInfo()
if self._infoLoaded:
return self._latestInfo['url']
urlR = re.compile(
r'<div class="fullImageLink" id="file">.*?<a href="'
r'(?P<url>[^ ]+?)"(?! class="image")|<span class="dangerousLink">'
r'<a href="(?P<url2>.+?)"', re.DOTALL)
m = urlR.search(self.getImagePageHtml())
url = m.group('url') or m.group('url2')
return url
def fileIsOnCommons(self):
"""Return True if the image is stored on Wikimedia Commons"""
if not self._infoLoaded:
self._loadInfo()
if self._infoLoaded:
return not self._local
return self.fileUrl().startswith(
u'http://upload.wikimedia.org/wikipedia/commons/')
def fileIsShared(self):
"""Return True if image is stored on Wikitravel shared repository."""
if 'wikitravel_shared' in self.site().shared_image_repository():
return self.fileUrl().startswith(
u'http://wikitravel.org/upload/shared/')
return self.fileIsOnCommons()
# FIXME: MD5 might be performed on incomplete file due to server
# disconnection (see bug #1795683).
def getFileMd5Sum(self):
"""Return image file's MD5 checksum."""
f = MyURLopener.open(self.fileUrl())
return md5(f.read()).hexdigest()
def getFileVersionHistory(self):
"""Return the image file's version history.
Return value is a list of tuples containing (timestamp, username,
resolution, filesize, comment).
"""
result = []
infos = self._loadInfo(500)
#API query
if infos:
for i in infos:
result.append((i['timestamp'], i['user'], u"%s×%s"
% (i['width'], i['height']), i['size'],
i['comment']))
return result
#from ImagePage HTML
history = re.search(
'(?s)<table class="wikitable filehistory">.+?</table>',
self.getImagePageHtml())
if history:
lineR = re.compile(
r'<tr>(?:<td>.*?</td>){1,2}<td.*?><a href=".+?">'
r'(?P<datetime>.+?)</a></td><td>.*?'
r'(?P<resolution>\d+\xd7\d+) <span.*?>'
r'\((?P<filesize>.+?)\)</span></td><td><a href=".+?"'
r'(?: class="new"|) title=".+?">'
r'(?P<username>.+?)</a>.*?</td><td>'
r'(?:.*?<span class="comment">'
r'\((?P<comment>.*?)\)</span>)?</td></tr>')
if not lineR.search(history.group()):
# b/c code
lineR = re.compile(
r'<tr>(?:<td>.*?</td>){1,2}<td><a href=".+?">'
r'(?P<datetime>.+?)</a></td><td><a href=".+?"'
r'(?: class="new"|) title=".+?">'
r'(?P<username>.+?)</a>.*?</td><td>'
r'(?P<resolution>.*?)</td><td class=".+?">'
r'(?P<filesize>.+?)</td><td>(?P<comment>.*?)</td></tr>')
else:
# backward compatible code
history = re.search('(?s)<ul class="special">.+?</ul>',
self.getImagePageHtml())
if history:
lineR = re.compile(
'<li> \(.+?\) \(.+?\) <a href=".+?" title=".+?">'
'(?P<datetime>.+?)</a> . . <a href=".+?" title=".+?">'
'(?P<username>.+?)</a> \(.+?\) . . '
'(?P<resolution>\d+.+?\d+) \((?P<filesize>[\d,\.]+) .+?\)'
'( <span class="comment">(?P<comment>.*?)</span>)?</li>')
if history:
for match in lineR.finditer(history.group()):
datetime = match.group('datetime')
username = match.group('username')
resolution = match.group('resolution')
size = match.group('filesize')
comment = match.group('comment') or ''
result.append((datetime, username, resolution, size, comment))
return result
def getFirstUploader(self):
""" Function that uses the APIs to detect the first uploader of the
image
"""
inf = self.getFileVersionHistory()[-1]
return [inf[1], inf[0]]
def getLatestUploader(self):
""" Function that uses the APIs to detect the latest uploader of the
image
"""
if not self._infoLoaded:
self._loadInfo()
if self._infoLoaded:
return [self._latestInfo['user'], self._latestInfo['timestamp']]
inf = self.getFileVersionHistory()[0]
return [inf[1], inf[0]]
def getHash(self):
""" Function that return the Hash of an file in oder to understand if
two Files are the same or not.
"""
if self.exists():
if not self._infoLoaded:
self._loadInfo()
try:
return self._latestInfo['sha1']
except (KeyError, IndexError, TypeError):
try:
self.get()
except NoPage:
output(u'%s has been deleted before getting the Hash. '
u'Skipping...' % self.title())
return
except IsRedirectPage:
output("Skipping %s because it's a redirect."
% self.title())
return
else:
raise NoHash('No Hash found in the APIs! Maybe the regex '
'to catch it is wrong or someone has changed '
'the APIs structure.')
else:
output(u'File deleted before getting the Hash. Skipping...')
def getFileVersionHistoryTable(self):
"""Return the version history in the form of a wiki table."""
lines = []
for (datetime, username, resolution, size, comment
) in self.getFileVersionHistory():
lines.append(u'| %s || %s || %s || %s || <nowiki>%s</nowiki>'
% (datetime, username, resolution, size, comment))
return u'{| border="1"\n! date/time || username || resolution || size || edit summary\n|----\n' + \
u'\n|----\n'.join(lines) + '\n|}'
def usingPages(self):
if not self.site().has_api() or self.site().versionnumber() < 11:
for a in self._usingPagesOld():
yield a
return
params = {
'action': 'query',
'list': 'imageusage',
'iutitle': self.title(),
'iulimit': config.special_page_limit,
#'': '',
}
while True:
data = query.GetData(params, self.site())
if 'error' in data:
raise RuntimeError("%s" % data['error'])
for iu in data['query']["imageusage"]:
yield Page(self.site(), iu['title'], defaultNamespace=iu['ns'])
if 'query-continue' in data:
params.update(data['query-continue']['imageusage'])
else:
break
def _usingPagesOld(self):
"""Yield Pages on which the image is displayed."""
titleList = re.search('(?s)<h2 id="filelinks">.+?<!-- end content -->',
self.getImagePageHtml()).group()
lineR = re.compile(
'<li><a href="[^\"]+" title=".+?">(?P<title>.+?)</a></li>')
for match in lineR.finditer(titleList):
try:
yield Page(self.site(), match.group('title'))
except InvalidTitle:
output(u"Image description page %s contains invalid reference "
u"to [[%s]]." % (self.title(), match.group('title')))
def globalUsage(self):
'''
Yield Pages on which the image is used globally.
Currently this probably only works on Wikimedia Commonas.
'''
if not self.site().has_api() or self.site().versionnumber() < 11:
# Not supported, just return none
return
params = {
'action': 'query',
'prop': 'globalusage',
'titles': self.title(),
'gulimit': config.special_page_limit,
#'': '',
}
while True:
data = query.GetData(params, self.site())
if 'error' in data:
raise RuntimeError("%s" % data['error'])
for (page, globalusage) in data['query']['pages'].items():
for gu in globalusage['globalusage']:
# FIXME : Should have a cleaner way to get the wiki where
# the image is used
siteparts = gu['wiki'].split('.')
if len(siteparts) == 3:
if siteparts[0] in self.site().fam().alphabetic and \
siteparts[1] in ['wikipedia', 'wiktionary',
'wikibooks', 'wikiquote',
'wikisource']:
code = siteparts[0]
fam = siteparts[1]
elif (siteparts[0] in ['meta', 'incubator'] and
siteparts[1] == u'wikimedia'):
code = code = siteparts[0]
fam = code = siteparts[0]
else:
code = None
fam = None
if code and fam:
site = getSite(code=code, fam=fam)
yield Page(site, gu['title'])
if 'query-continue' in data:
params.update(data['query-continue']['globalusage'])
else:
break
class _GetAll(object):
"""For internal use only - supports getall() function"""
def __init__(self, site, pages, throttle, force):
self.site = site
self.pages = []
self.throttle = throttle
self.force = force
self.sleeptime = 15
for page in pages:
if (not hasattr(page, '_contents') and
not hasattr(page, '_getexception')) or force:
self.pages.append(page)
elif verbose:
output(u"BUGWARNING: %s already done!"
% page.title(asLink=True))
def sleep(self):
time.sleep(self.sleeptime)
if self.sleeptime <= 60:
self.sleeptime += 15
elif self.sleeptime < 360:
self.sleeptime += 60
def run(self):
if self.pages:
# Sometimes query does not contains revisions
# or some pages are missing. Deactivate api call and use the
# old API special:export
if self.site.has_api() and False:
while True:
try:
data = self.getDataApi()
except (socket.error, httplib.BadStatusLine, ServerError):
# Print the traceback of the caught exception
exception(tb=True)
debug(u'got network error in _GetAll.run. '
u'Sleeping for %d seconds...' % self.sleeptime)
self.sleep()
else:
if 'error' in data:
raise RuntimeError(data['error'])
else:
break
self.headerDoneApi(data['query'])
if 'normalized' in data['query']:
self._norm = dict([(x['from'], x['to'])
for x in data['query']['normalized']])
for vals in data['query']['pages'].values():
self.oneDoneApi(vals)
else: # read pages via Special:Export
while True:
try:
data = self.getData()
except (socket.error, httplib.BadStatusLine, ServerError):
# Print the traceback of the caught exception
exception(tb=True)
debug(u'got network error in _GetAll.run. '
u'Sleeping for %d seconds...' % self.sleeptime)
self.sleep()
else:
if "<title>Wiki does not exist</title>" in data:
raise NoSuchSite(u'Wiki %s does not exist yet'
% self.site)
elif "</mediawiki>" not in data[-20:]:
# HTML error Page got thrown because of an internal
# error when fetching a revision.
output(u'Received incomplete XML data. '
u'Sleeping for %d seconds...'
% self.sleeptime)
self.sleep()
elif "<siteinfo>" not in data:
# This probably means we got a 'temporary
# unaivalable'
output(u'Got incorrect export page. '
u'Sleeping for %d seconds...'
% self.sleeptime)
self.sleep()
else:
break
R = re.compile(r"\s*<\?xml([^>]*)\?>(.*)", re.DOTALL)
m = R.match(data)
if m:
data = m.group(2)
handler = xmlreader.MediaWikiXmlHandler()
handler.setCallback(self.oneDone)
handler.setHeaderCallback(self.headerDone)
#f = open("backup.txt", "w")
#f.write(data)
#f.close()
try:
xml.sax.parseString(data, handler)
except (xml.sax._exceptions.SAXParseException,
ValueError), err:
debugDump('SaxParseBug', self.site, err, data)
raise
except PageNotFound:
return
# All of the ones that have not been found apparently do not
# exist
for pl in self.pages:
if not hasattr(pl, '_contents') and \
not hasattr(pl, '_getexception'):
pl._getexception = NoPage
def oneDone(self, entry):
title = entry.title
username = entry.username
ipedit = entry.ipedit
timestamp = entry.timestamp
text = entry.text
editRestriction = entry.editRestriction
moveRestriction = entry.moveRestriction
revisionId = entry.revisionid
page = Page(self.site, title)
successful = False
for page2 in self.pages:
if page2.sectionFreeTitle() == page.sectionFreeTitle():
if not (hasattr(page2, '_contents') or
hasattr(page2, '_getexception')) or self.force:
page2.editRestriction = entry.editRestriction
page2.moveRestriction = entry.moveRestriction
if editRestriction == 'autoconfirmed':
page2._editrestriction = True
page2._permalink = entry.revisionid
page2._userName = username
page2._ipedit = ipedit
page2._revisionId = revisionId
page2._editTime = parsetime2stamp(timestamp)
page2._versionhistory = [(revisionId, timestamp, username,
entry.comment)]
section = page2.section()
# Store the content
page2._contents = text
m = self.site.redirectRegex().match(text)
if m:
## output(u"%s is a redirect" % page2.title(asLink=True))
redirectto = m.group(1)
if section and not "#" in redirectto:
redirectto += "#" + section
page2._getexception = IsRedirectPage
page2._redirarg = redirectto
# This is used for checking deletion conflict.
# Use the data loading time.
page2._startTime = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if section:
m = re.search("=+[ ']*%s[ ']*=+" % re.escape(section),
text)
if not m:
try:
page2._getexception
warning(u"Section not found: %s" % page2)
except AttributeError:
# There is no exception yet
page2._getexception = SectionError
successful = True
# Note that there is no break here. The reason is that there
# might be duplicates in the pages list.
if not successful:
output(u"BUG>> title %s (%s) not found in list" % (title, page))
output(u'Expected one of: %s'
% u', '.join([unicode(page2) for page2 in self.pages]))
raise PageNotFound
def headerDone(self, header):
# Verify version
version = header.generator
p = re.compile('^MediaWiki (.+)$')
m = p.match(version)
if m:
version = m.group(1)
# only warn operator when versionnumber has been changed
versionnumber = self.site.family.versionnumber
if version != self.site.version() and \
versionnumber(self.site.lang,
version=version) != versionnumber(self.site.lang):
warning(u'Family file %s contains version number %s, but it '
u'should be %s' % (self.site.family.name,
self.site.version(), version))
# Verify case
if self.site.nocapitalize:
case = 'case-sensitive'
else:
case = 'first-letter'
if case != header.case.strip():
warning(u'Family file %s contains case %s, but it should be %s'
% (self.site.family.name, case, header.case.strip()))
# Verify namespaces
lang = self.site.lang
ids = header.namespaces.keys()
ids.sort()
for id in ids:
nshdr = header.namespaces[id]
if self.site.family.isDefinedNSLanguage(id, lang):
ns = self.site.namespace(id) or u''
if ns != nshdr:
try:
dflt = self.site.family.namespace('_default', id)
except KeyError:
dflt = u''
if not ns and not dflt:
flag = u"is not set, but should be '%s'" % nshdr
elif dflt == ns:
flag = u"is set to default ('%s'), but should be '%s'" \
% (ns, nshdr)
elif dflt == nshdr:
flag = (u"is '%s', but should be removed "
u"(default value '%s')" % (ns, nshdr))
else:
flag = u"is '%s', but should be '%s'" % (ns, nshdr)
warning(u"Outdated family file %s: namespace['%s'][%i] %s"
% (self.site.family.name, lang, id, flag))
# self.site.family.namespaces[id][lang] = nshdr
else:
warning(u"Missing namespace in family file "
u"%s: namespace['%s'][%i] (it is set to '%s')"
% (self.site.family.name, lang, id, nshdr))
for id in self.site.family.namespaces:
if self.site.family.isDefinedNSLanguage(
id, lang) and id not in header.namespaces:
warning(u"Family file %s includes namespace['%s'][%i], but it "
u"should be removed (namespace doesn't exist in the "
u"site)" % (self.site.family.name, lang, id))
def getData(self, curonly=True):
address = self.site.export_address()
pagenames = [page.sectionFreeTitle() for page in self.pages]
# We need to use X convention for requested page titles.
if self.site.lang == 'eo':
pagenames = [encodeEsperantoX(pagetitle) for pagetitle in pagenames]
pagenames = u'\r\n'.join(pagenames)
if type(pagenames) is not unicode:
warning(u'xmlreader.WikipediaXMLHandler.getData() got non-unicode '
u'page names. Please report this.')
output(unicode(pagenames))
# convert Unicode string to the encoding used on that wiki
pagenames = pagenames.encode(self.site.encoding())
predata = {
'action': 'submit',
'pages': pagenames,
}
if curonly:
predata['curonly'] = 'True'
# Slow ourselves down
get_throttle(requestsize=len(self.pages))
# Now make the actual request to the server
now = time.time()
response, data = self.site.postForm(address, predata)
# The XML parser doesn't expect a Unicode string, but an encoded one,
# so we'll encode it back.
data = data.encode(self.site.encoding())
#get_throttle.setDelay(time.time() - now)
return data
def oneDoneApi(self, data):
title = data['title']
if not ('missing' in data or 'invalid' in data):
revisionId = data['lastrevid']
rev = None
try:
rev = data['revisions']
except KeyError:
raise KeyError(
u'NOTE: Last revision of [[%s]] not found' % title)
else:
username = rev[0]['user']
ipedit = 'anon' in rev[0]
timestamp = rev[0]['timestamp']
text = rev[0]['*']
editRestriction = ''
moveRestriction = ''
for revs in data['protection']:
if revs['type'] == 'edit':
editRestriction = revs['level']
elif revs['type'] == 'move':
moveRestriction = revs['level']
page = Page(self.site, title)
successful = False
for page2 in self.pages:
if hasattr(self, '_norm') and \
page2.sectionFreeTitle() in self._norm:
page2._title = self._norm[page2.sectionFreeTitle()]
if page2.sectionFreeTitle() == page.sectionFreeTitle():
if 'missing' in data:
page2._getexception = NoPage
successful = True
break
if 'invalid' in data:
page2._getexception = BadTitle
successful = True
break
if not (hasattr(page2, '_contents') or
hasattr(page2, '_getexception')) or self.force:
page2.editRestriction = editRestriction
page2.moveRestriction = moveRestriction
if editRestriction == 'autoconfirmed':
page2._editrestriction = True
page2._permalink = revisionId
if rev:
page2._userName = username
page2._ipedit = ipedit
page2._editTime = parsetime2stamp(timestamp)
page2._contents = text
else:
raise KeyError(
u'BUG?>>: Last revision of [[%s]] not found'
% title)
page2._revisionId = revisionId
section = page2.section()
if 'redirect' in data:
## output(u"%s is a redirect" % page2.title(asLink=True))
m = self.site.redirectRegex().match(text)
redirectto = m.group(1)
if section and not "#" in redirectto:
redirectto += "#" + section
page2._getexception = IsRedirectPage
page2._redirarg = redirectto
# This is used for checking deletion conflict.
# Use the data loading time.
page2._startTime = time.strftime('%Y%m%d%H%M%S',
time.gmtime())
if section:
m = re.search("=+[ ']*%s[ ']*=+" % re.escape(section),
text)
if not m:
try:
page2._getexception
warning(u"Section not found: %s"
% page2)
except AttributeError:
# There is no exception yet
page2._getexception = SectionError
successful = True
# Note that there is no break here. The reason is that there
# might be duplicates in the pages list.
if not successful:
output(u"BUG>> title %s (%s) not found in list" % (title, page))
output(u'Expected one of: %s'
% u', '.join([unicode(page2) for page2 in self.pages]))
raise PageNotFound
def headerDoneApi(self, header):
p = re.compile('^MediaWiki (.+)$')
m = p.match(header['general']['generator'])
if m:
version = m.group(1)
# only warn operator when versionnumber has been changed
versionnumber = self.site.family.versionnumber
if version != self.site.version() and \
versionnumber(self.site.lang,
version=version) != versionnumber(self.site.lang):
warning(u'Family file %s contains version number %s, but it '
u'should be %s' % (self.site.family.name,
self.site.version(), version))
# Verify case
if self.site.nocapitalize:
case = 'case-sensitive'
else:
case = 'first-letter'
if case != header['general']['case'].strip():
warning(u'Family file %s contains case %s, but it should be %s'
% (self.site.family.name, case, header.case.strip()))
# Verify namespaces
lang = self.site.lang
ids = header['namespaces'].keys()
ids.sort()
for id in ids:
nshdr = header['namespaces'][id]['*']
id = header['namespaces'][id]['id']
if self.site.family.isDefinedNSLanguage(id, lang):
ns = self.site.namespace(id) or u''
if ns != nshdr:
try:
dflt = self.site.family.namespace('_default', id)
except KeyError:
dflt = u''
if not ns and not dflt:
flag = u"is not set, but should be '%s'" % nshdr
elif dflt == ns:
flag = u"is set to default ('%s'), but should be '%s'" \
% (ns, nshdr)
elif dflt == nshdr:
flag = (u"is '%s', but should be removed "
u"(default value '%s')" % (ns, nshdr))
else:
flag = u"is '%s', but should be '%s'" % (ns, nshdr)
warning(u"Outdated family file %s: namespace['%s'][%i] %s"
% (self.site.family.name, lang, id, flag))
#self.site.family.namespaces[id][lang] = nshdr
else:
warning(u"Missing namespace in family file "
u"%s: namespace['%s'][%i] (it is set to '%s')"
% (self.site.family.name, lang, id, nshdr))
for id in self.site.family.namespaces:
if self.site.family.isDefinedNSLanguage(id, lang) and \
u'%i' % id not in header['namespaces']:
warning(u"Family file %s includes namespace['%s'][%i], but it "
u"should be removed (namespace doesn't exist in the "
u"site)" % (self.site.family.name, lang, id))
def getDataApi(self):
pagenames = [page.sectionFreeTitle() for page in self.pages]
params = {
'action': 'query',
'meta': 'siteinfo',
'prop': ['info', 'revisions'],
'titles': pagenames,
'siprop': ['general', 'namespaces'],
'rvprop': ['content', 'timestamp', 'user', 'comment',
'size'], # 'ids',
'inprop': ['protection',
'subjectid'], # , 'talkid', 'url', 'readable'
}
# Slow ourselves down
get_throttle(requestsize=len(self.pages))
# Now make the actual request to the server
now = time.time()
#get_throttle.setDelay(time.time() - now)
return query.GetData(params, self.site)
def getall(site, pages, throttle=True, force=False):
"""Bulk-retrieve a group of pages from site
Arguments: site = Site object
pages = iterable that yields Page objects
"""
# TODO: why isn't this a Site method?
pages = list(pages) # if pages is an iterator, we need to make it a list
output(pywikibot.translate(
'en',
u'Getting %(count)d page{{PLURAL:count||s}} %(API)sfrom %(site)s...',
{'count': len(pages),
# API is deactivated since r8036 because some pages are missing
'API': (u'', u'via API ')[site.has_api() and False],
'site': site}))
# default is 500/4, but It might have good point for server.
limit = config.special_page_limit / 4
if len(pages) > limit:
# separate export pages for bulk-retrieve
for pagg in range(0, len(pages), limit):
if pagg == range(0, len(pages), limit)[-1]: # latest retrieve
k = pages[pagg:]
output(u'Getting pages %d - %d of %d...' % (pagg + 1,
len(pages),
len(pages)))
_GetAll(site, k, throttle, force).run()
pages[pagg:] = k
else:
k = pages[pagg:pagg + limit]
output(u'Getting pages %d - %d of %d...' % (pagg + 1,
pagg + limit,
len(pages)))
_GetAll(site, k, throttle, force).run()
pages[pagg:pagg + limit] = k
# one time to retrieve is 7.7 sec.
get_throttle(requestsize=len(pages) / 10)
else:
_GetAll(site, pages, throttle, force).run()
# Library functions
def setAction(s):
"""Set a summary to use for changed page submissions"""
global action
action = s
# Default action
setAction('Wikipedia python library')
def setUserAgent(s):
"""Set a User-agent: header passed to the HTTP server"""
global useragent
useragent = s
# Default User-agent
setUserAgent(USER_AGENT_FORMAT % dict(
script=('-'.join(version.get_executing_script())),
versionrev=(version.getversiondict()["rev"])
))
def url2link(percentname, insite, site):
"""Convert urlname of a wiki page into interwiki link format.
'percentname' is the page title as given by Page.urlname();
'insite' specifies the target Site;
'site' is the Site on which the page is found.
"""
# Note: this is only needed if linking between wikis that use different
# encodings, so it is now largely obsolete. [CONFIRM]
percentname = percentname.replace('_', ' ')
x = url2unicode(percentname, site=site)
return unicode2html(x, insite.encoding())
def decodeEsperantoX(text):
"""
Decode Esperanto text encoded using the x convention.
E.g., Cxefpagxo and CXefpagXo will both be converted to Ĉefpaĝo.
Note that to encode non-Esperanto words like Bordeaux, one uses a
double x, i.e. Bordeauxx or BordeauxX.
"""
chars = {
u'c': u'ĉ',
u'C': u'Ĉ',
u'g': u'ĝ',
u'G': u'Ĝ',
u'h': u'ĥ',
u'H': u'Ĥ',
u'j': u'ĵ',
u'J': u'Ĵ',
u's': u'ŝ',
u'S': u'Ŝ',
u'u': u'ŭ',
u'U': u'Ŭ',
}
for latin, esperanto in chars.iteritems():
# A regular expression that matches a letter combination which IS
# encoded using x-convention.
xConvR = re.compile(latin + '[xX]+')
pos = 0
result = ''
# Each matching substring will be regarded exactly once.
while True:
match = xConvR.search(text[pos:])
if match:
old = match.group()
if len(old) % 2 == 0:
# The first two chars represent an Esperanto letter.
# Following x's are doubled.
new = esperanto + ''.join([old[2 * i] for i in
xrange(1, len(old) / 2)])
else:
# The first character stays latin; only the x's are doubled.
new = latin + ''.join([old[2 * i + 1]
for i in xrange(0, len(old) / 2)])
result += text[pos: match.start() + pos] + new
pos += match.start() + len(old)
else:
result += text[pos:]
text = result
break
return text
def encodeEsperantoX(text):
"""
Convert standard wikitext to the Esperanto x-encoding.
Double X-es where necessary so that we can submit a page to an Esperanto
wiki. Again, we have to keep stupid stuff like cXxXxxX in mind. Maybe
someone wants to write about the Sony Cyber-shot DSC-Uxx camera series on
eo: ;)
"""
# A regular expression that matches a letter combination which is NOT
# encoded in x-convention.
notXConvR = re.compile('[cghjsuCGHJSU][xX]+')
pos = 0
result = ''
while True:
match = notXConvR.search(text[pos:])
if match:
old = match.group()
# the first letter stays; add an x after each X or x.
new = old[0] + ''.join([old[i] + 'x' for i in xrange(1, len(old))])
result += text[pos: match.start() + pos] + new
pos += match.start() + len(old)
else:
result += text[pos:]
text = result
break
return text
######## Unicode library functions ########
def UnicodeToAsciiHtml(s):
"""Convert unicode to a bytestring using HTML entities."""
html = []
for c in s:
cord = ord(c)
if 31 < cord < 128:
html.append(c)
else:
html.append('&#%d;' % cord)
return ''.join(html)
def url2unicode(title, site, site2=None):
"""Convert url-encoded text to unicode using site's encoding.
If site2 is provided, try its encodings as well. Uses the first encoding
that doesn't cause an error.
"""
# create a list of all possible encodings for both hint sites
encList = [site.encoding()] + list(site.encodings())
if site2 and site2 != site:
encList.append(site2.encoding())
encList += list(site2.encodings())
firstException = None
# try to handle all encodings (will probably retry utf-8)
for enc in encList:
try:
t = title.encode(enc)
t = urllib.unquote(t)
return unicode(t, enc)
except UnicodeError, ex:
if not firstException:
firstException = ex
pass
# Couldn't convert, raise the original exception
raise firstException
def unicode2html(x, encoding):
"""
Ensure unicode string is encodable, or else convert to ASCII for HTML.
Arguments are a unicode string and an encoding. Attempt to encode the
string into the desired format; if that doesn't work, encode the unicode
into html &#; entities. If it does work, return it unchanged.
"""
try:
x.encode(encoding)
except UnicodeError:
x = UnicodeToAsciiHtml(x)
return x
# Utility functions for parsing page titles
def html2unicode(text, ignore=None):
"""Return text, replacing HTML entities by equivalent unicode characters."""
if ignore is None:
ignore = []
# This regular expression will match any decimal and hexadecimal entity and
# also entities that might be named entities.
entityR = re.compile(
r'&(?:amp;)?(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|'
r'(?P<name>[A-Za-z]+));')
ignore.extend((38, # Ampersand (&)
39, # Bugzilla 24093
60, # Less than (<)
62, # Great than (>)
# Opening bracket - sometimes used intentionally inside links
91,
# Closing bracket - sometimes used intentionally inside links
93,
# Vertical bar (??) - used intentionally in navigation bar
# templates on de:
124,
160,))
# These characters are Html-illegal, but sadly you *can* find some of
# these and converting them to unichr(decimal) is unsuitable
convertIllegalHtmlEntities = {
128: 8364, # €
130: 8218, # ‚
131: 402, # ƒ
132: 8222, # „
133: 8230, # …
134: 8224, # †
135: 8225, # ‡
136: 710, # ˆ
137: 8240, # ‰
138: 352, # Š
139: 8249, # ‹
140: 338, # Œ
142: 381, # Ž
145: 8216, # ‘
146: 8217, # ’
147: 8220, # “
148: 8221, # ”
149: 8226, # •
150: 8211, # –
151: 8212, # —
152: 732, # ˜
153: 8482, # ™
154: 353, # š
155: 8250, # ›
156: 339, # œ
158: 382, # ž
159: 376 # Ÿ
}
#ensuring that illegal   and , which have no known values,
#don't get converted to unichr(129), unichr(141) or unichr(157)
ignore = set(ignore) | set([129, 141, 157])
result = u''
i = 0
found = True
while found:
text = text[i:]
match = entityR.search(text)
if match:
unicodeCodepoint = None
if match.group('decimal'):
unicodeCodepoint = int(match.group('decimal'))
elif match.group('hex'):
unicodeCodepoint = int(match.group('hex'), 16)
elif match.group('name'):
name = match.group('name')
if name in htmlentitydefs.name2codepoint:
# We found a known HTML entity.
unicodeCodepoint = htmlentitydefs.name2codepoint[name]
result += text[:match.start()]
try:
unicodeCodepoint = convertIllegalHtmlEntities[unicodeCodepoint]
except KeyError:
pass
if unicodeCodepoint and unicodeCodepoint not in ignore and \
(WIDEBUILD or unicodeCodepoint < 65534):
result += unichr(unicodeCodepoint)
else:
# Leave the entity unchanged
result += text[match.start():match.end()]
i = match.end()
else:
result += text
found = False
return result
# Warning! _familyCache does not necessarily have to be consistent between
# two statements. Always ensure that a local reference is created when
# accessing Family objects
_familyCache = weakref.WeakValueDictionary()
def Family(fam=None, fatal=True, force=False):
"""Import the named family.
@param fam: family name (if omitted, uses the configured default)
@type fam: str
@param fatal: if True, the bot will stop running if the given family is
unknown. If False, it will only raise a ValueError exception.
@type fatal: bool
@return: a Family instance configured for the named family.
"""
if fam is None:
fam = config.family
family = _familyCache.get(fam)
if family and not force:
return family
try:
# search for family module in the 'families' subdirectory
sys.path.append(config.datafilepath('families'))
myfamily = __import__('%s_family' % fam)
except ImportError:
if fatal:
output(u"""\
Error importing the %s family. This probably means the family
does not exist. Also check your configuration file."""
% fam)
import traceback
traceback.print_stack()
sys.exit(1)
else:
raise ValueError("Family %s does not exist" % repr(fam))
family = myfamily.Family()
_familyCache[fam] = family
return family
class Site(object):
"""A MediaWiki site. Do not instantiate directly; use getSite() function.
Constructor takes three arguments; only code is mandatory:
see __init__() param
Methods:
language: This Site's language code.
family: This Site's Family object.
sitename: A string representing this Site.
languages: A list of all languages contained in this site's Family.
validLanguageLinks: A list of language codes that can be used in interwiki
links.
loggedInAs: return current username, or None if not logged in.
forceLogin: require the user to log in to the site
messages: return True if there are new messages on the site
cookies: return user's cookies as a string
getUrl: retrieve an URL from the site
urlEncode: Encode a query to be sent using an http POST request.
postForm: Post form data to an address at this site.
postData: Post encoded form data to an http address at this site.
namespace(num): Return local name of namespace 'num'.
normalizeNamespace(value): Return preferred name for namespace 'value' in
this Site's language.
namespaces: Return list of canonical namespace names for this Site.
getNamespaceIndex(name): Return the int index of namespace 'name', or None
if invalid.
redirect: Return the localized redirect tag for the site.
redirectRegex: Return compiled regular expression matching on redirect
pages.
mediawiki_message: Retrieve the text of a specified MediaWiki message
has_mediawiki_message: True if this site defines specified MediaWiki
message
has_api: True if this site's family provides api interface
shared_image_repository: Return tuple of image repositories used by this
site.
shared_image_repository: Return tuple of data repositories used by this
site.
category_on_one_line: Return True if this site wants all category links
on one line.
interwiki_putfirst: Return list of language codes for ordering of
interwiki links.
linkto(title): Return string in the form of a wikilink to 'title'
isInterwikiLink(s): Return True if 's' is in the form of an interwiki
link.
getSite(lang): Return Site object for wiki in same family, language
'lang'.
version: Return MediaWiki version string from Family file.
versionnumber: Return int identifying the MediaWiki version.
live_version: Return version number read from Special:Version.
checkCharset(charset): Warn if charset doesn't match family file.
server_time: returns server time (currently userclock depending)
getParsedString: Parses the string with API and returns html content.
getExpandedString: Expands the string with API and returns wiki content.
linktrail: Return regex for trailing chars displayed as part of a link.
disambcategory: Category in which disambiguation pages are listed.
Methods that yield Page objects derived from a wiki's Special: pages
(note, some methods yield other information in a tuple along with the
Pages; see method docs for details) --
search(query): query results from Special:Search
allpages(): Special:Allpages
prefixindex(): Special:Prefixindex
protectedpages(): Special:ProtectedPages
newpages(): Special:Newpages
newimages(): Special:Log&type=upload
longpages(): Special:Longpages
shortpages(): Special:Shortpages
categories(): Special:Categories (yields Category objects)
deadendpages(): Special:Deadendpages
ancientpages(): Special:Ancientpages
lonelypages(): Special:Lonelypages
recentchanges(): Special:Recentchanges
unwatchedpages(): Special:Unwatchedpages (sysop accounts only)
uncategorizedcategories(): Special:Uncategorizedcategories (yields
Category objects)
uncategorizedpages(): Special:Uncategorizedpages
uncategorizedimages(): Special:Uncategorizedimages (yields
ImagePage objects)
uncategorizedtemplates(): Special:UncategorizedTemplates
unusedcategories(): Special:Unusuedcategories (yields Category)
unusedfiles(): Special:Unusedimages (yields ImagePage)
randompage: Special:Random
randomredirectpage: Special:RandomRedirect
withoutinterwiki: Special:Withoutinterwiki
linksearch: Special:Linksearch
Convenience methods that provide access to properties of the wiki Family
object; all of these are read-only and return a unicode string unless
noted --
encoding: The current encoding for this site.
encodings: List of all historical encodings for this site.
category_namespace: Canonical name of the Category namespace on this
site.
category_namespaces: List of all valid names for the Category
namespace.
image_namespace: Canonical name of the Image namespace on this site.
template_namespace: Canonical name of the Template namespace on this
site.
protocol: Protocol ('http' or 'https') for access to this site.
hostname: Host portion of site URL.
path: URL path for index.php on this Site.
dbName: MySQL database name.
Methods that return addresses to pages on this site (usually in
Special: namespace); these methods only return URL paths, they do not
interact with the wiki --
export_address: Special:Export.
query_address: URL path + '?' for query.php
api_address: URL path + '?' for api.php
apipath: URL path for api.php
move_address: Special:Movepage.
delete_address(s): Delete title 's'.
undelete_view_address(s): Special:Undelete for title 's'
undelete_address: Special:Undelete.
protect_address(s): Protect title 's'.
unprotect_address(s): Unprotect title 's'.
put_address(s): Submit revision to page titled 's'.
get_address(s): Retrieve page titled 's'.
nice_get_address(s): Short URL path to retrieve page titled 's'.
edit_address(s): Edit form for page titled 's'.
purge_address(s): Purge cache and retrieve page 's'.
block_address: Block an IP address.
unblock_address: Unblock an IP address.
blocksearch_address(s): Search for blocks on IP address 's'.
linksearch_address(s): Special:Linksearch for target 's'.
search_address(q): Special:Search for query 'q'.
allpages_address(s): Special:Allpages.
newpages_address: Special:Newpages.
longpages_address: Special:Longpages.
shortpages_address: Special:Shortpages.
unusedfiles_address: Special:Unusedimages.
categories_address: Special:Categories.
deadendpages_address: Special:Deadendpages.
ancientpages_address: Special:Ancientpages.
lonelypages_address: Special:Lonelypages.
protectedpages_address: Special:ProtectedPages
unwatchedpages_address: Special:Unwatchedpages.
uncategorizedcategories_address: Special:Uncategorizedcategories.
uncategorizedimages_address: Special:Uncategorizedimages.
uncategorizedpages_address: Special:Uncategorizedpages.
uncategorizedtemplates_address: Special:UncategorizedTemplates.
unusedcategories_address: Special:Unusedcategories.
withoutinterwiki_address: Special:Withoutinterwiki.
references_address(s): Special:Whatlinksere for page 's'.
allmessages_address: Special:Allmessages.
upload_address: Special:Upload.
double_redirects_address: Special:Doubleredirects.
broken_redirects_address: Special:Brokenredirects.
random_address: Special:Random.
randomredirect_address: Special:Random.
login_address: Special:Userlogin.
captcha_image_address(id): Special:Captcha for image 'id'.
watchlist_address: Special:Watchlist editor.
contribs_address(target): Special:Contributions for user 'target'.
"""
@deprecate_arg("persistent_http", None)
def __init__(self, code, fam=None, user=None):
"""
@param code: the site's language code
@type code: str
@param fam: wiki family name (optional)
@type fam: str or Family
@param user: bot user name (optional)
@type user: str
"""
self.__code = code.lower()
if isinstance(fam, basestring) or fam is None:
self.__family = Family(fam, fatal=False)
else:
self.__family = fam
# if we got an outdated language code, use the new one instead.
if self.__code in self.__family.obsolete:
if self.__family.obsolete[self.__code] is not None:
self.__code = self.__family.obsolete[self.__code]
else:
# no such language anymore
raise NoSuchSite("Language %s in family %s is obsolete"
% (self.__code, self.__family.name))
if self.__code not in self.languages():
if self.__family.name in self.__family.langs.keys() and \
len(self.__family.langs) == 1:
oldcode = self.__code
self.__code = self.__family.name
if self.__family == pywikibot.config.family \
and oldcode == pywikibot.config.mylang:
pywikibot.config.mylang = self.__code
else:
raise NoSuchSite("Language %s does not exist in family %s"
% (self.__code, self.__family.name))
self.nocapitalize = self.code in self.family.nocapitalize
self._mediawiki_messages = {}
self._info = {}
self._userName = [None, None]
self.user = user
self._userData = [False, False]
self._isLoggedIn = [None, None]
self._isBlocked = [None, None]
self._messages = [None, None]
self._rights = [None, None]
self._token = [None, None]
self._patrolToken = [None, None]
self._cookies = [None, None]
# Calculating valid languages took quite long, so we calculate it once
# in initialization instead of each time it is used.
self._validlanguages = []
for language in self.languages():
if not language[0].upper() + language[1:] in self.namespaces():
self._validlanguages.append(language)
def __call__(self):
"""Since the Page.site() method has a property decorator, return the
site object for backwards-compatibility if Page.site() call is still
used instead of Page.site as recommended.
"""
## # DEPRECATED warning. Should be uncommented if scripts are actualized
## output('Page.site() method is DEPRECATED, '
## 'use Page.site instead.')
return self
@property
def family(self):
"""The Family object for this Site's wiki family."""
return self.__family
@property
def code(self):
"""The identifying code for this Site.
By convention, this is usually an ISO language code, but it does
not have to be.
"""
return self.__code
@property
def lang(self):
"""The ISO language code for this Site.
Presumed to be equal to the wiki prefix, but this can be overridden.
"""
return self.__code
def __cmp__(self, other):
"""Perform equality and inequality tests on Site objects."""
if not isinstance(other, Site):
return 1
if self.family.name == other.family.name:
return cmp(self.code, other.code)
return cmp(self.family.name, other.family.name)
def _userIndex(self, sysop=False):
"""Returns the internal index of the user."""
return 1 if sysop else 0
def username(self, sysop=False):
return self._userName[self._userIndex(sysop=sysop)]
def sitename(self):
"""Return string representing this Site's name and code."""
return '%s:%s' % (self.family.name, self.code)
def __repr__(self):
return '%s:%s' % (self.family.name, self.code)
def __hash__(self):
return hash(repr(self))
def linktrail(self):
"""Return regex for trailing chars displayed as part of a link.
Returns a string, not a compiled regular expression object.
This reads from the family file, and ''not'' from
[[MediaWiki:Linktrail]], because the MW software currently uses a
built-in linktrail from its message files and ignores the wiki
value.
"""
return self.family.linktrail(self.code)
def languages(self):
"""Return list of all valid language codes for this site's Family."""
return self.family.iwkeys
def validLanguageLinks(self):
"""Return list of language codes that can be used in interwiki links."""
return self._validlanguages
def namespaces(self):
"""Return list of canonical namespace names for this Site."""
# n.b.: this does not return namespace numbers; to determine which
# numeric namespaces the framework recognizes for this Site (which
# may or may not actually exist on the wiki), use
# self.family.namespaces.keys()
if self in _namespaceCache:
return _namespaceCache[self]
else:
nslist = []
for n in self.family.namespaces:
try:
ns = self.family.namespace(self.lang, n)
except KeyError:
# No default namespace defined
continue
if ns is not None:
nslist.append(self.family.namespace(self.lang, n))
_namespaceCache[self] = nslist
return nslist
def redirect(self, default=False):
"""Return the localized redirect tag for the site.
"""
# return the magic word without the preceding '#' character
if default or self.versionnumber() <= 13:
return u'REDIRECT'
else:
return self.getmagicwords('redirect')[0].lstrip("#")
def loggedInAs(self, sysop=False):
"""Return the current username if logged in, otherwise return None.
Checks if we're logged in by loading a page and looking for the login
link. We assume that we're not being logged out during a bot run, so
loading the test page is only required once.
"""
index = self._userIndex(sysop)
if self._isLoggedIn[index] is None:
# Load the details only if you don't know the login status.
# Don't load them just because the other details aren't known.
self._load(sysop=sysop)
if self._isLoggedIn[index]:
return self._userName[index]
def forceLogin(self, sysop=False):
"""Log the user in if not already logged in."""
if not self.loggedInAs(sysop=sysop):
loginMan = login.LoginManager(site=self, sysop=sysop)
#loginMan.logout()
if loginMan.login(retry=True):
index = self._userIndex(sysop)
self._isLoggedIn[index] = True
self._userName[index] = loginMan.username
# We know nothing about the new user (but its name)
# Old info is about the anonymous user
self._userData[index] = False
def checkBlocks(self, sysop=False):
"""Check if the user is blocked, and raise an exception if so."""
self._load(sysop=sysop)
index = self._userIndex(sysop)
if self._isBlocked[index]:
# User blocked
raise UserBlocked('User is blocked in site %s' % self)
def isBlocked(self, sysop=False):
"""Check if the user is blocked."""
self._load(sysop=sysop)
index = self._userIndex(sysop)
if self._isBlocked[index]:
# User blocked
return True
else:
return False
def _getBlock(self, sysop=False):
"""Get user block data from the API."""
try:
params = {
'action': 'query',
'meta': 'userinfo',
'uiprop': 'blockinfo',
}
data = query.GetData(params, self)
if not data or 'error' in data:
return False
if self.versionnumber() == 11: # fix for version 1.11 API.
data = data['userinfo']
else:
data = data['query']['userinfo']
return 'blockedby' in data
except NotImplementedError:
return False
def isAllowed(self, right, sysop=False):
"""Check if the user has a specific right.
Among possible rights:
* Actions: edit, move, delete, protect, upload
* User levels: autoconfirmed, sysop, bot, empty string (always true)
"""
if right == '' or right is None:
return True
else:
self._load(sysop=sysop)
index = self._userIndex(sysop)
# Handle obsolete editusercssjs permission
if right in ['editusercss', 'edituserjs'] \
and right not in self._rights[index]:
return 'editusercssjs' in self._rights[index]
return right in self._rights[index]
def server_time(self):
"""returns a datetime object representing server time"""
# It is currently user-clock depending
return self.family.server_time()
def messages(self, sysop=False):
"""Returns true if the user has new messages, and false otherwise."""
self._load(sysop=sysop)
index = self._userIndex(sysop)
return self._messages[index]
def cookies(self, sysop=False):
"""Return a string containing the user's current cookies."""
self._loadCookies(sysop=sysop)
index = self._userIndex(sysop)
if self._cookies[index]:
#convert cookies dictionary data to string.
outputDatas = ""
for k, v in self._cookies[index].iteritems():
if v:
outputDatas += "%s=%s; " % (k, v)
else:
# protection for value ''
outputDatas += "%s=none; " % k
return outputDatas
def _loadCookies(self, sysop=False):
"""Retrieve session cookies for login
if family datas define the cross projects, this function will search
the central login file made by self or cross available project
functioin will read the cookiedata if got one of them is exist
"""
index = self._userIndex(sysop)
if self._cookies[index] is not None:
return
try:
if sysop:
try:
username = config.sysopnames[self.family.name][self.lang]
except KeyError:
raise NoUsername("""\
You tried to perform an action that requires admin privileges, but you haven't
entered your sysop name in your user-config.py. Please add
sysopnames['%s']['%s']='name' to your user-config.py"""
% (self.family.name, self.lang))
else:
username = config.usernames[self.family.name][self.lang]
except KeyError:
self._cookies[index] = None
self._isLoggedIn[index] = False
else:
# check central login data if cross_projects is available.
localFn = '%s-%s-%s-login.data' % (self.family.name, self.lang,
username)
localPa = config.datafilepath('login-data', localFn)
if self.family.cross_projects:
for proj in [self.family.name] + self.family.cross_projects:
#find all central data in all cross_projects
centralFn = '%s-%s-central-login.data' % (proj, username)
centralPa = config.datafilepath('login-data', centralFn)
if os.path.exists(centralPa):
self._cookies[index] = self._readCookies(centralFn)
break
if os.path.exists(localPa):
#read and dump local logindata into self._cookies[index]
# if self._cookies[index] is not availabe, read the local data
# and set the dictionary.
if type(self._cookies[index]) == dict:
for k, v in self._readCookies(localFn).iteritems():
if k not in self._cookies[index]:
self._cookies[index][k] = v
else:
self._cookies[index] = dict(
[(k, v) for k, v in
self._readCookies(localFn).iteritems()])
## self._cookies[index] = query.CombineParams(
## self._cookies[index], self._readCookies(localFn))
elif not os.path.exists(localPa) and not self.family.cross_projects:
# keep anonymous mode if not login and centralauth not enable
self._cookies[index] = None
self._isLoggedIn[index] = False
def _readCookies(self, filename):
"""read login cookie file and return a dictionary."""
try:
f = open(config.datafilepath('login-data', filename), 'r')
ck = re.compile("(.*?)=(.*?)\r?\n")
data = dict([(x[0], x[1]) for x in ck.findall(f.read())])
## data = dict(ck.findall(f.read()))
f.close()
return data
except IOError:
return
def _setupCookies(self, datas, sysop=False):
"""save the cookie dictionary to files
if cross_project enable, savefiles will separate two, centraldata and '
localdata.
"""
index = self._userIndex(sysop)
if not self._cookies[index]:
self._cookies[index] = datas
cache = {0: "", 1: ""} # 0 is central auth, 1 is local.
if not self.username(sysop):
if not self._cookies[index]:
return
elif self.family.cross_projects_cookie_username in self._cookies[
index]:
# Using centralauth to cross login data, it's not necessary to
# forceLogin, but Site() didn't know it.
# So we need add centralauth username data into siteattribute
self._userName[index] = self._cookies[index][
self.family.cross_projects_cookie_username]
for k, v in datas.iteritems():
#put key and values into save cache
if self.family.cross_projects and \
k in self.family.cross_projects_cookies:
cache[0] += "%s=%s\n" % (k, v)
else:
cache[1] += "%s=%s\n" % (k, v)
# write the data.
if self.family.cross_projects and cache[0]:
filename = '%s-%s-central-login.data' % (self.family.name,
self.username(sysop))
f = open(config.datafilepath('login-data', filename), 'w')
f.write(cache[0])
f.close()
filename = '%s-%s-%s-login.data' % (self.family.name, self.lang,
self.username(sysop))
f = open(config.datafilepath('login-data', filename), 'w')
f.write(cache[1])
f.close()
def _removeCookies(self, name):
# remove cookies.
# ToDo: remove all local datas if cross_projects enable.
#
if self.family.cross_projects:
file = config.datafilepath('login-data',
'%s-%s-central-login.data'
% (self.family.name, name))
if os.path.exists(file):
os.remove(file)
file = config.datafilepath('login-data', '%s-%s-%s-login.data'
% (self.family.name, self.lang, name))
if os.path.exists(file):
os.remove(file)
def updateCookies(self, datas, sysop=False):
"""Check and update the current cookies datas and save back to files."""
index = self._userIndex(sysop)
if not self._cookies[index]:
self._setupCookies(datas, sysop)
for k, v in datas.iteritems():
if k in self._cookies[index]:
if v != self._cookies[index][k]:
self._cookies[index][k] = v
else:
self._cookies[index][k] = v
self._setupCookies(self._cookies[index], sysop)
def urlEncode(self, query):
"""Encode a query so that it can be sent using an http POST request."""
if not query:
return
if hasattr(query, 'iteritems'):
iterator = query.iteritems()
else:
iterator = iter(query)
l = []
wpEditToken = None
for key, value in iterator:
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
key = urllib.quote(key)
value = urllib.quote(value)
if key == 'wpEditToken':
wpEditToken = value
continue
l.append(key + '=' + value)
# wpEditToken is explicitly added as last value.
# If a premature connection abort occurs while putting, the server will
# not have received an edit token and thus refuse saving the page
if wpEditToken is not None:
l.append('wpEditToken=' + wpEditToken)
return '&'.join(l)
def solveCaptcha(self, data):
if type(data) == dict: # API Mode result
if 'edit' in data and data['edit']['result'] != u"Success":
data = data['edit']
if "captcha" in data:
data = data['captcha']
captype = data['type']
id = data['id']
if captype in ['simple', 'math', 'question']:
answer = input('What is the answer to the captcha "%s" ?'
% data['question'])
elif captype == 'image':
url = '%s://%s%s' % (self.protocol(), self.hostname(),
self.captcha_image_address(id))
answer = ui.askForCaptcha(url)
else: # no captcha id result, maybe ReCaptcha.
raise CaptchaError(
'We have been prompted for a ReCaptcha, but '
'pywikipedia does not yet support ReCaptchas')
return {'id': id, 'answer': answer}
return
else:
captchaW = re.compile(
'<label for="wpCaptchaWord">(?P<question>[^<]*)</label>')
captchaR = re.compile(
'<input type="hidden" name="wpCaptchaId" id="wpCaptchaId" '
'value="(?P<id>\d+)" />')
match = captchaR.search(data)
if match:
id = match.group('id')
match = captchaW.search(data)
if match:
answer = input('What is the answer to the captcha "%s" ?'
% match.group('question'))
else:
if not config.solve_captcha:
raise CaptchaError(id)
url = '%s://%s%s' % (self.protocol(), self.hostname(),
self.captcha_image_address(id))
answer = ui.askForCaptcha(url)
return {'id': id, 'answer': answer}
Recaptcha = re.compile(
'<script type="text/javascript" '
'src="http://api\.recaptcha\.net/[^"]*"></script>')
if Recaptcha.search(data):
raise CaptchaError(
'We have been prompted for a ReCaptcha, but pywikipedia '
'does not yet support ReCaptchas')
def postForm(self, address, predata, sysop=False, cookies=None):
"""Post http form data to the given address at this site.
address - the absolute path without hostname.
predata - a dict or any iterable that can be converted to a dict,
containing keys and values for the http form.
cookies - the cookies to send with the form. If None, send self.cookies
Return a (response, data) tuple, where response is the HTTP
response object and data is a Unicode string containing the
body of the response.
"""
if ('action' in predata) and pywikibot.simulate and \
(predata['action'] in pywikibot.config.actions_to_block) and \
(address not in [self.export_address()]):
output(u'\03{lightyellow}SIMULATION: %s action blocked.\03{default}'
% predata['action'])
import StringIO
f_dummy = StringIO.StringIO()
f_dummy.__dict__.update({u'code': 0, u'msg': u''})
return f_dummy, u''
data = self.urlEncode(predata)
try:
if cookies:
return self.postData(address, data, sysop=sysop,
cookies=cookies)
else:
return self.postData(address, data, sysop=sysop,
cookies=self.cookies(sysop=sysop))
except socket.error, e:
raise ServerError(e)
def postData(self, address, data,
contentType='application/x-www-form-urlencoded',
sysop=False, compress=True, cookies=None):
"""Post encoded data to the given http address at this site.
address is the absolute path without hostname.
data is an ASCII string that has been URL-encoded.
Returns a (response, data) tuple where response is the HTTP
response object and data is a Unicode string containing the
body of the response.
"""
if address[-1] == "?":
address = address[:-1]
headers = {
'User-agent': str(useragent),
'Content-Length': str(len(data)),
'Content-type': str(contentType),
}
if cookies:
headers['Cookie'] = str(cookies)
if compress:
headers['Accept-encoding'] = 'gzip'
#print '%s' % headers
url = '%s://%s%s' % (self.protocol(), self.hostname(), address)
# Try to retrieve the page until it was successfully loaded (just in
# case the server is down or overloaded).
# Wait for retry_idle_time minutes (growing!) between retries.
retry_idle_time = 1
retry_attempt = 0
while True:
try:
request = urllib2.Request(str(url), str(data), headers)
f = MyURLopener.open(request)
# read & info can raise socket.error
text = f.read()
headers = f.info()
break
except KeyboardInterrupt:
raise
except urllib2.HTTPError, e:
if e.code in [401, 404]:
debug(u"Got HTTP/%i %s: \n %r"
% (e.code, e.message, e.read()))
raise PageNotFound(u'Page %s could not be retrieved. Check '
u'your family file ?' % url)
# just check for HTTP Status 500 (Internal Server Error)?
elif e.code in [500, 502, 504]:
output(u'HTTPError: %s %s' % (e.code, e.msg))
if config.retry_on_fail:
retry_attempt += 1
if retry_attempt > config.maxretries:
raise MaxTriesExceededError()
warning(u"Could not open '%s'.\nMaybe the server is "
"down. Retrying in %i minutes..."
% (url, retry_idle_time))
time.sleep(retry_idle_time * 60)
# Next time wait longer, but not longer than half an
# hour
retry_idle_time *= 2
if retry_idle_time > 30:
retry_idle_time = 30
continue
raise
else:
output(u"Result: %s %s" % (e.code, e.msg))
raise
except Exception:
exception(tb=pywikibot.verbose)
if config.retry_on_fail:
retry_attempt += 1
if retry_attempt > config.maxretries:
raise MaxTriesExceededError()
warning(u"Could not open '%s'. Maybe the server or\nyour "
u"connection is down. Retrying in %i minutes..."
% (url, retry_idle_time))
time.sleep(retry_idle_time * 60)
retry_idle_time *= 2
if retry_idle_time > 30:
retry_idle_time = 30
continue
raise
# check cookies return or not, if return, send its to update.
if hasattr(f, 'sheaders'):
ck = f.sheaders
else:
ck = f.info().getallmatchingheaders('set-cookie')
if ck:
Reat = re.compile(': (.*?)=(.*?);')
tmpc = {}
for d in ck:
m = Reat.search(d)
if m:
tmpc[m.group(1)] = m.group(2)
if self.cookies(sysop):
self.updateCookies(tmpc, sysop)
resContentType = headers.get('content-type', '')
contentEncoding = headers.get('content-encoding', '')
# Ensure that all sent data is received
# In rare cases we found a douple Content-Length in the header.
# We need to split it to get a value
content_length = int(headers.get('content-length', '0').split(',')[0])
if content_length != len(text) and 'content-length' in headers:
warning(u'len(text) does not match content-length: %s != %s'
% (len(text), content_length))
return self.postData(address, data, contentType, sysop, compress,
cookies)
if compress and contentEncoding == 'gzip':
text = decompress_gzip(text)
R = re.compile('charset=([^\'\";]+)')
m = R.search(resContentType)
if m:
charset = m.group(1)
else:
if verbose:
warning(u"No character set found.")
# UTF-8 as default
charset = 'utf-8'
# Check if this is the charset we expected
self.checkCharset(charset)
# Convert HTML to Unicode
try:
text = unicode(text, charset, errors='strict')
except UnicodeDecodeError:
if verbose:
exception()
error(u'Invalid characters found on %s://%s%s, replaced by \\ufffd.'
% (self.protocol(), self.hostname(), address))
# We use error='replace' in case of bad encoding.
text = unicode(text, charset, errors='replace')
# If a wiki page, get user data
self._getUserDataOld(text, sysop=sysop)
return f, text
## @deprecated("pywikibot.comms.http.request") # in 'compat' not yet...
def getUrl(self, path, retry=None, sysop=False, data=None, compress=True,
no_hostname=False, cookie_only=False, refer=None,
back_response=False):
"""
Low-level routine to get a URL from the wiki. Tries to login if it is
another wiki.
Parameters:
path - The absolute path, without the hostname.
retry - If True, retries loading the page when a network error
occurs.
sysop - If True, the sysop account's cookie will be used.
data - An optional dict providing extra post request
parameters.
cookie_only - Only return the cookie the server sent us back
Returns the HTML text of the page converted to unicode.
"""
from pywikibot.comms import http
f, text = http.request(self, path, retry, sysop, data, compress,
no_hostname, cookie_only, refer,
back_response=True)
# If a wiki page, get user data
self._getUserDataOld(text, sysop=sysop)
if back_response:
return f, text
return text
def _getUserData(self, text, sysop=False, force=True):
"""
Get the user data from an API query dict.
Parameters:
* text - the page text
* sysop - is the user a sysop?
"""
index = self._userIndex(sysop)
# Check for blocks
if 'blockedby' in text and not self._isBlocked[index]:
# Write a warning if not shown earlier
if sysop:
account = 'Your sysop account'
else:
account = 'Your account'
warning(u'\n%s on %s is blocked by %s.\nReason: %s\n'
u'Editing using this account will stop the run.\n'
% (account, self, text['blockedby'], text['blockreason']))
self._isBlocked[index] = 'blockedby' in text
# Check for new messages, the data must had key 'messages' in dict.
if 'messages' in text:
if not self._messages[index]:
# User has *new* messages
if sysop:
output(u'NOTE: You have new messages in your sysop account '
u'on %s' % self)
else:
output(u'NOTE: You have new messages on %s' % self)
self._messages[index] = True
else:
self._messages[index] = False
# Don't perform other checks if the data was already loaded
if self._userData[index] and not force:
return
# Get username.
# The data in anonymous mode had key 'anon'
# if 'anon' exist, username is IP address, not to collect it right now
if not 'anon' in text:
self._isLoggedIn[index] = True
self._userName[index] = text['name']
else:
self._isLoggedIn[index] = False
self._userName[index] = None
# Get user groups and rights
if 'groups' in text:
self._rights[index] = []
for group in text['groups']:
# Convert dictionaries to list items (bug 3311663)
if isinstance(group, dict):
self._rights[index].extend(group.keys())
else:
self._rights[index].append(group)
self._rights[index].extend(text['rights'])
# Warnings
# Don't show warnings for not logged in users, they will just fail
# to do any action
if self._isLoggedIn[index]:
if 'bot' not in self._rights[index] and \
config.notify_unflagged_bot:
# Sysop + bot flag = Sysop flag in MediaWiki < 1.7.1?
if sysop:
output(u'Note: Your sysop account on %s does not have '
u'a bot flag. Its edits will be visible in the '
u'recent changes.' % self)
else:
warning(u'Your account on %s does not have a bot flag. '
u'Its edits will be visible in the recent '
u'changes and it may get blocked.' % self)
if sysop and 'sysop' not in self._rights[index]:
warning(u'Your sysop account on %s does not seem to have '
u'sysop rights. You may not be able to perform any '
u'sysop-restricted actions using it.' % self)
else:
# 'groups' is not exists, set default rights
self._rights[index] = []
if self._isLoggedIn[index]:
# Logged in user
self._rights[index].append('user')
# Assume bot, and thus autoconfirmed
self._rights[index].extend(['bot', 'autoconfirmed'])
if sysop:
# Assume user reported as a sysop indeed has the sysop
# rights
self._rights[index].append('sysop')
# Assume the user has the default rights if API not query back
self._rights[index].extend(['read', 'createaccount', 'edit', 'upload',
'createpage', 'createtalk', 'move',
'upload'])
#remove Duplicate rights
self._rights[index] = list(set(self._rights[index]))
# Get token
if 'preferencestoken' in text:
self._token[index] = text['preferencestoken']
if self._rights[index] is not None:
# Token and rights are loaded - user data is now loaded
self._userData[index] = True
elif self.versionnumber() < 14:
# uiprop 'preferencestoken' is start from 1.14, if 1.8~13,
# we need to use other way to get token
params = {
'action': 'query',
'prop': 'info',
'titles': 'Non-existing page',
'intoken': 'edit',
}
data = query.GetData(params, self,
sysop=sysop)['query']['pages'].values()[0]
if 'edittoken' in data:
self._token[index] = data['edittoken']
self._userData[index] = True
else:
warning(u'Token not found on %s. You will not be able to edit '
u'any page.' % self)
else:
if not self._isBlocked[index]:
warning(u'Token not found on %s. You will not be able to edit '
u'any page.' % self)
def _getUserDataOld(self, text, sysop=False, force=True):
"""
Get the user data from a wiki page data.
Parameters:
* text - the page text
* sysop - is the user a sysop?
"""
index = self._userIndex(sysop)
if '<div id="globalWrapper">' not in text:
# Not a wiki page
return
# Check for blocks - but only if version is 1.11 (userinfo is available)
# and the user data was not yet loaded
if self.versionnumber() >= 11 and (not self._userData[index] or force):
blocked = self._getBlock(sysop=sysop)
if blocked and not self._isBlocked[index]:
# Write a warning if not shown earlier
if sysop:
account = 'Your sysop account'
else:
account = 'Your account'
warning(u'%s on %s is blocked. Editing using this account will '
u'stop the run.' % (account, self))
self._isBlocked[index] = blocked
# Check for new messages
if '<div class="usermessage">' in text:
if not self._messages[index]:
# User has *new* messages
if sysop:
output(u'NOTE: You have new messages in your sysop account '
u'on %s' % self)
else:
output(u'NOTE: You have new messages on %s' % self)
self._messages[index] = True
else:
self._messages[index] = False
# Don't perform other checks if the data was already loaded
if self._userData[index] and not force:
return
# Search for the the user page link at the top.
# Note that the link of anonymous users (which doesn't exist at all
# in Wikimedia sites) has the ID pt-anonuserpage, and thus won't be
# found here.
userpageR = re.compile('<li id="pt-userpage".*?><a href=".+?".*?>'
'(?P<username>.+?)</a></li>')
m = userpageR.search(text)
if m:
self._isLoggedIn[index] = True
self._userName[index] = m.group('username')
else:
self._isLoggedIn[index] = False
# No idea what is the user name, and it isn't important
self._userName[index] = None
if self.family.name == 'wikitravel':
# fix for Wikitravel's user page link.
self = self.family.user_page_link(self, index)
# Check user groups, if possible (introduced in 1.10)
groupsR = re.compile(r'var wgUserGroups = \[\"(.+)\"\];')
m = groupsR.search(text)
checkLocal = True
if default_code in self.family.cross_allowed:
# if current languages in cross allowed list, check global bot flag.
globalgroupsR = re.compile(r'var wgGlobalGroups = \[\"(.+)\"\];')
mg = globalgroupsR.search(text)
if mg: # the account had global permission
globalRights = mg.group(1)
globalRights = globalRights.split('","')
self._rights[index] = globalRights
if self._isLoggedIn[index]:
if 'Global_bot' in globalRights:
# This account has the global bot flag, no need to
# check local flags.
checkLocal = False
else:
output(u'Your bot account does not have global the bot '
u'flag, checking local flag.')
else:
if verbose:
output(u'Note: this language does not allow global bots.')
if m and checkLocal:
rights = m.group(1)
rights = rights.split('", "')
if '*' in rights:
rights.remove('*')
self._rights[index] = rights
# Warnings
# Don't show warnings for not logged in users, they will just fail
# to do any action
if self._isLoggedIn[index]:
if 'bot' not in self._rights[index] and \
config.notify_unflagged_bot:
# Sysop + bot flag = Sysop flag in MediaWiki < 1.7.1?
if sysop:
output(u'Note: Your sysop account on %s does not have '
u'a bot flag. Its edits will be visible in the '
u'recent changes.' % self)
else:
warning(u'Your account on %s does not have a bot flag. '
u'Its edits will be visible in the recent '
u'changes and it may get blocked.' % self)
if sysop and 'sysop' not in self._rights[index]:
warning(u'Your sysop account on %s does not seem to have '
u'sysop rights. You may not be able to perform any '
u'sysop-restricted actions using it.' % self)
else:
# We don't have wgUserGroups, and can't check the rights
self._rights[index] = []
if self._isLoggedIn[index]:
# Logged in user
self._rights[index].append('user')
# Assume bot, and thus autoconfirmed
self._rights[index].extend(['bot', 'autoconfirmed'])
if sysop:
# Assume user reported as a sysop indeed has the sysop
# rights
self._rights[index].append('sysop')
# Assume the user has the default rights
self._rights[index].extend(['read', 'createaccount', 'edit', 'upload',
'createpage', 'createtalk', 'move',
'upload'])
if 'bot' in self._rights[index] or 'sysop' in self._rights[index]:
self._rights[index].append('apihighlimits')
if 'sysop' in self._rights[index]:
self._rights[index].extend(['delete', 'undelete', 'block',
'protect', 'import', 'deletedhistory',
'unwatchedpages'])
# Search for a token
tokenR = re.compile(
r"\<input type='hidden' value=\"(.*?)\" name=\"wpEditToken\"")
tokenloc = tokenR.search(text)
if tokenloc:
self._token[index] = tokenloc.group(1)
if self._rights[index] is not None:
# In this case, token and rights are loaded - user data is now
# loaded
self._userData[index] = True
else:
# Token not found
# Possible reason for this is the user is blocked, don't show a
# warning in this case, otherwise do show a warning
# Another possible reason is that the page cannot be edited - ensure
# there is a textarea and the tab "view source" is not shown
if u'<textarea' in text and \
u'<li id="ca-viewsource"' not in text and \
not self._isBlocked[index]:
# Token not found
warning(u'Token not found on %s. You will not be able to edit '
u'any page.' % self)
def siteinfo(self, key='general', force=False, dump=False):
"""Get Mediawiki Site informations by API
dump - return all siteinfo datas
some siprop params is huge data for MediaWiki, they take long times to
read by testment. These params could get, but only one by one.
"""
# protection for key in other datatype
if not isinstance(key, basestring):
key = 'general'
if self._info and key in self._info and not force:
if dump:
return self._info
else:
return self._info[key]
params = {
'action': 'query',
'meta': 'siteinfo',
'siprop': ['general', 'namespaces'],
}
#ver 1.10 handle
if self.versionnumber() > 10:
params['siprop'].extend(['statistics', ])
if key in ['specialpagealiases', 'interwikimap', 'namespacealiases',
'usergroups']:
if verbose:
output(u'getting huge siprop %s...' % key)
params['siprop'] = [key]
#ver 1.13 handle
if self.versionnumber() > 13:
if key not in ['specialpagealiases', 'interwikimap',
'namespacealiases', 'usergroups']:
params['siprop'].extend(['fileextensions', 'rightsinfo'])
if key in ['magicwords', 'extensions', ]:
if verbose:
output(u'getting huge siprop %s...' % key)
params['siprop'] = [key]
try:
data = query.GetData(params, self)['query']
except KeyError, NotImplementedError:
return
if not hasattr(self, '_info'):
self._info = data
else:
if key == 'magicwords':
if self.versionnumber() <= 13:
return # Not implemented
self._info[key] = {}
for entry in data[key]:
self._info[key][entry['name']] = entry['aliases']
else:
for k, v in data.iteritems():
self._info[k] = v
#data pre-process
if dump:
return self._info
else:
return self._info.get(key)
def mediawiki_message(self, key, forceReload=False):
"""Return the MediaWiki message text for key "key" """
# Allmessages is retrieved once for all per created Site object
if (not self._mediawiki_messages) or forceReload:
api = self.has_api()
if verbose:
output(u"Retrieving mediawiki messages from "
u"Special:Allmessages")
# Only MediaWiki r27393/1.12 and higher support XML output for
# Special:Allmessages
if self.versionnumber() < 12:
usePHP = True
else:
usePHP = False
elementtree = True
try:
try:
from xml.etree.cElementTree import XML # 2.5
except ImportError:
try:
from cElementTree import XML
except ImportError:
from elementtree.ElementTree import XML
except ImportError:
if verbose:
output(u'Elementtree was not found, using '
u'BeautifulSoup instead')
elementtree = False
if config.use_diskcache and not api:
import diskcache
_dict = lambda x: diskcache.CachedReadOnlyDictI(
x, prefix="msg-%s-%s-" % (self.family.name, self.lang))
else:
_dict = dict
retry_idle_time = 1
retry_attempt = 0
while True:
if api and self.versionnumber() >= 12 or \
self.versionnumber() >= 16:
params = {
'action': 'query',
'meta': 'allmessages',
'ammessages': key,
}
datas = query.GetData(params,
self)['query']['allmessages'][0]
if "missing" in datas:
raise KeyError("message '%s' does not exist." % key)
elif datas['name'] not in self._mediawiki_messages:
self._mediawiki_messages[datas['name']] = datas['*']
## self._mediawiki_messages = _dict(
## [(tag['name'].lower(), tag['*'])
## for tag in datas if not 'missing' in tag])
elif usePHP:
phppage = self.getUrl(self.get_address(
"Special:Allmessages") + "&ot=php")
Rphpvals = re.compile(r"(?ms)'([^']*)' => '(.*?[^\\])',")
# Previous regexp don't match empty messages. Fast
# workaround...
phppage = re.sub("(?m)^('.*?' =>) '',", r"\1 ' ',",
phppage)
self._mediawiki_messages = _dict(
[(name.strip().lower(),
html2unicode(message.replace("\\'", "'")))
for (name, message) in Rphpvals.findall(phppage)])
else:
xml = self.getUrl(self.get_address(
"Special:Allmessages") + "&ot=xml")
# xml structure is :
# <messages lang="fr">
# <message name="about">À propos</message>
# ...
# </messages>
if elementtree:
decode = xml.encode(self.encoding())
# Skip extraneous data such as PHP warning or extra
# whitespaces added from some MediaWiki extensions
xml_dcl_pos = decode.find('<?xml')
if xml_dcl_pos > 0:
decode = decode[xml_dcl_pos:]
tree = XML(decode)
self._mediawiki_messages = _dict(
[(tag.get('name').lower(), tag.text)
for tag in tree.getiterator('message')])
else:
tree = BeautifulStoneSoup(xml)
self._mediawiki_messages = _dict(
[(tag.get('name').lower(), html2unicode(tag.string))
for tag in tree.findAll('message') if tag.string])
if not self._mediawiki_messages:
# No messages could be added.
# We assume that the server is down.
# Wait some time, then try again.
warning(u'No messages found in Special:Allmessages. Maybe '
u'the server is down. Retrying in %i minutes...'
% retry_idle_time)
time.sleep(retry_idle_time * 60)
# Next time wait longer, but not longer than half an hour
retry_attempt += 1
if retry_attempt > config.maxretries:
raise ServerError()
retry_idle_time *= 2
if retry_idle_time > 30:
retry_idle_time = 30
continue
break
if self.family.name == 'wikitravel':
# fix for Wikitravel's mediawiki message setting
self = self.family.mediawiki_message(self)
key = key.lower()
try:
return self._mediawiki_messages[key]
except KeyError:
if not forceReload:
return self.mediawiki_message(key, True)
else:
raise KeyError("MediaWiki key '%s' does not exist on %s"
% (key, self))
def has_mediawiki_message(self, key):
"""Return True if this site defines a MediaWiki message for 'key'."""
#return key in self._mediawiki_messages
try:
v = self.mediawiki_message(key)
return True
except KeyError:
return False
def has_api(self):
"""Return True if this sites family has api interface."""
try:
if config.use_api:
x = self.apipath()
del x
return True
except NotImplementedError:
pass
return False
def _load(self, sysop=False, force=False):
"""
Loads user data.
This is only done if we didn't do get any page yet and the information
is requested, otherwise we should already have this data.
Parameters:
* sysop - Get sysop user data?
"""
index = self._userIndex(sysop)
if self._userData[index] and not force:
return
if verbose:
output(u'Getting information for site %s' % self)
# Get data
# API Userinfo is available from version 1.11
# preferencetoken available from 1.14
if self.has_api() and self.versionnumber() >= 11:
#Query userinfo
params = {
'action': 'query',
'meta': 'userinfo',
'uiprop': ['blockinfo', 'groups', 'rights', 'hasmsg'],
}
if self.versionnumber() >= 12:
params['uiprop'].append('ratelimits')
if self.versionnumber() >= 14:
params['uiprop'].append('preferencestoken')
data = query.GetData(params, self, sysop=sysop)
# Show the API error code instead making an index error
if 'error' in data:
raise RuntimeError('%s' % data['error'])
if self.versionnumber() == 11:
text = data['userinfo']
else:
text = data['query']['userinfo']
self._getUserData(text, sysop=sysop, force=force)
else:
url = self.edit_address('Non-existing_page')
text = self.getUrl(url, sysop=sysop)
self._getUserDataOld(text, sysop=sysop, force=force)
def search(self, key, number=10, namespaces=None):
"""
Yield search results for query.
Use API when enabled use_api and version >= 1.11,
or use Special:Search.
"""
if self.has_api() and self.versionnumber() >= 11:
#Yield search results (using api) for query.
params = {
'action': 'query',
'list': 'search',
'srsearch': key,
}
if number:
params['srlimit'] = number
if namespaces:
params['srnamespace'] = namespaces
if namespaces == []:
params['srnamespace'] = [ni for ni in
(self.getNamespaceIndex(x)
for x in self.namespaces())
if ni >= 0]
offset = 0
while offset < number or not number:
params['sroffset'] = offset
data = query.GetData(params, self)
if 'error'in data:
raise NotImplementedError('%s' % data['error']['info'])
data = data['query']
if 'error' in data:
raise RuntimeError('%s' % data['error'])
if not data['search']:
break
for s in data['search']:
offset += 1
page = Page(self, s['title'])
if self.versionnumber() >= 16:
yield (page, s['snippet'], '', s['size'],
s['wordcount'], s['timestamp'])
else:
yield page, '', '', '', '', ''
else:
#Yield search results (using Special:Search page) for query.
throttle = True
path = self.search_address(urllib.quote_plus(key.encode('utf-8')),
n=number, ns=namespaces)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
ur'<li><a href=".+?" title="(?P<title>.+?)">.+?</a>',
re.DOTALL)
for m in entryR.finditer(html):
page = Page(self, m.group('title'))
yield page, '', '', '', '', ''
# TODO: avoid code duplication for the following methods
def logpages(self, number=50, mode='', title=None, user=None, repeat=False,
namespace=[], start=None, end=None, tag=None, newer=False,
dump=False, offset=None):
if not self.has_api() or self.versionnumber() < 11 or \
mode not in ('block', 'protect', 'rights', 'delete', 'upload',
'move', 'import', 'patrol', 'merge', 'suppress',
'review', 'stable', 'gblblock', 'renameuser',
'globalauth', 'gblrights', 'abusefilter',
'articlefeedbackv5', 'newusers'):
raise NotImplementedError(mode)
params = {
'action': 'query',
'list': 'logevents',
'letype': mode,
'lelimit': int(number),
'ledir': 'older',
'leprop': ['ids', 'title', 'type', 'user', 'timestamp',
'comment', 'details'],
}
if number > config.special_page_limit:
params['lelimit'] = config.special_page_limit
if number > 5000 and self.isAllowed('apihighlimits'):
params['lelimit'] = 5000
if newer:
params['ledir'] = 'newer'
if user:
params['leuser'] = user
if title:
params['letitle'] = title
if start:
params['lestart'] = start
if offset and offset > 0:
warning(u'offset parameter %s ignored,\n'
u' start parameter is set to %s'
% (offset, start))
# offset in hours from now
elif offset and offset > 0:
start = Timestamp.utcnow() - datetime.timedelta(0, offset * 3600)
params['lestart'] = str(start)
if end:
params['leend'] = end
if tag and self.versionnumber() >= 16: # tag support from mw:r58399
params['letag'] = tag
nbresults = 0
while True:
result = query.GetData(params, self)
if 'error' in result and \
result.get('error').get('code') == u'leparam_title':
output('%(info)s' % result.get('error'))
raise BadTitle
# FIXME: Throw proper exceptions instead of "Error"
if 'error' in result or 'warnings' in result:
output('%s' % result)
raise Error
for c in result['query']['logevents']:
if (not namespace or c['ns'] in namespace) and \
not 'actionhidden' in c.keys():
if dump:
# dump result only.
yield c
else:
if c['ns'] == 6:
p_ret = ImagePage(self, c['title'])
else:
p_ret = Page(self, c['title'],
defaultNamespace=c['ns'])
yield (p_ret, c['user'],
parsetime2stamp(c['timestamp']),
c['comment'])
nbresults += 1
if nbresults >= number:
break
if 'query-continue' in result and nbresults < number:
params.update(result['query-continue']['logevents'])
elif repeat:
nbresults = 0
try:
params.pop('lestart')
except KeyError:
pass
else:
break
return
@deprecate_arg("get_redirect", None) # 20120822
def newpages(self, user=None, returndict=False,
number=10, repeat=False, namespace=0,
rcshow=['!bot', '!redirect']):
"""Yield new articles (as Page objects) from recent changes.
Starts with the newest article and fetches the number of articles
specified in the first argument. If repeat is True, it fetches
Newpages again. If there is no new page, it blocks until there is
one, sleeping between subsequent fetches of Newpages.
The objects yielded are dependent on parmater returndict.
When true, it yields a tuple composed of a Page object and a dict of
attributes.
When false, it yields a tuple composed of the Page object,
timestamp (unicode), length (int), an empty unicode string, username
or IP address (str), comment (unicode).
"""
# TODO: in recent MW versions Special:Newpages takes a namespace
# parameter, and defaults to 0 if not specified.
# TODO: Detection of unregistered users is broken
# TODO: Repeat mechanism doesn't make much sense as implemented;
# should use both offset and limit parameters, and have an
# option to fetch older rather than newer pages
# N.B. API still provides no way to access Special:Newpages content
# directly, so we get new pages indirectly through 'recentchanges'
if self.has_api() and self.versionnumber() >= 10:
gen = self.recentchanges(number=number, rcshow=rcshow, rctype='new',
namespace=namespace, repeat=repeat,
user=user, returndict=True)
for newpage, pageitem in gen:
if returndict:
yield (newpage, pageitem)
else:
yield (newpage, pageitem['timestamp'], pageitem['newlen'],
u'', pageitem['user'], pageitem['comment'])
else:
seen = set()
while True:
path = self.newpages_address(n=number, namespace=namespace)
# The throttling is important here, so always enabled.
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li[^>]*>(?P<date>.+?) \S*?<a href=".+?"'
' title="(?P<title>.+?)">.+?</a>.+?[\(\[]'
'(?P<length>[\d,.]+)[^\)\]]*[\)\]]'
' .?<a href=".+?" title=".+?:(?P<username>.+?)">')
for m in entryR.finditer(html):
date = m.group('date')
title = m.group('title')
title = title.replace('"', '"')
length = int(re.sub("[,.]", "", m.group('length')))
loggedIn = u''
username = m.group('username')
comment = u''
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page, date, length, loggedIn, username, comment
if not repeat:
break
def longpages(self, number=10, repeat=False):
"""Yield Pages from Special:Longpages.
Return values are a tuple of Page object, length(int).
"""
#TODO: should use offset and limit parameters; 'repeat' as now
# implemented is fairly useless
# this comment applies to all the XXXXpages methods following, as well
seen = set()
path = self.longpages_address(n=number)
entryR = re.compile(
ur'<li>\(<a href=".+?" title=".+?">.+?</a>\) .<a href=".+?" '
ur'title="(?P<title>.+?)">.+?</a> .\[(?P<length>[\d.,]+).*?\]</li>',
re.UNICODE)
while True:
get_throttle()
html = self.getUrl(path)
for m in entryR.finditer(html):
title = m.group('title')
length = int(re.sub('[.,]', '', m.group('length')))
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page, length
if not repeat:
break
def shortpages(self, number=10, repeat=False):
"""Yield Pages and lengths from Special:Shortpages."""
throttle = True
seen = set()
path = self.shortpages_address(n=number)
entryR = re.compile(
ur'<li>\(<a href=".+?" title=".+?">.+?</a>\) .<a href=".+?" '
ur'title="(?P<title>.+?)">.+?</a> .\[(?P<length>[\d.,]+).*?\]</li>',
re.UNICODE)
while True:
get_throttle()
html = self.getUrl(path)
for m in entryR.finditer(html):
title = m.group('title')
length = int(re.sub('[., ]', '', m.group('length')))
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page, length
if not repeat:
break
def categories(self, number=10, repeat=False):
"""Yield Category objects from Special:Categories"""
import catlib
seen = set()
while True:
path = self.categories_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a>.*?</li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = catlib.Category(self, title)
yield page
if not repeat:
break
def deadendpages(self, number=10, repeat=False):
"""Yield Page objects retrieved from Special:Deadendpages."""
seen = set()
while True:
path = self.deadendpages_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def ancientpages(self, number=10, repeat=False):
"""Yield Pages, datestamps from Special:Ancientpages."""
seen = set()
while True:
path = self.ancientpages_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a> '
'(?P<date>.+?)</li>')
for m in entryR.finditer(html):
title = m.group('title')
date = m.group('date')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page, date
if not repeat:
break
def lonelypages(self, number=10, repeat=False):
"""Yield Pages retrieved from Special:Lonelypages."""
throttle = True
seen = set()
while True:
path = self.lonelypages_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def unwatchedpages(self, number=10, repeat=False):
"""Yield Pages from Special:Unwatchedpages (requires Admin privileges).
"""
seen = set()
while True:
path = self.unwatchedpages_address(n=number)
get_throttle()
html = self.getUrl(path, sysop=True)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a>.+?</li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def uncategorizedcategories(self, number=10, repeat=False):
"""Yield Categories from Special:Uncategorizedcategories."""
import catlib
seen = set()
while True:
path = self.uncategorizedcategories_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = catlib.Category(self, title)
yield page
if not repeat:
break
def newimages(self, number=100, lestart=None, leend=None, leuser=None,
letitle=None, repeat=False):
"""
Yield ImagePages from APIs, call:
action=query&list=logevents&letype=upload&lelimit=500
Options directly from APIs:
---
Parameters:
Default: ids|title|type|user|timestamp|comment|details
lestart - The timestamp to start enumerating from.
leend - The timestamp to end enumerating.
ledir - In which direction to enumerate.
One value: newer, older
Default: older
leuser - Filter entries to those made by the given user.
letitle - Filter entries to those related to a page.
lelimit - How many total event entries to return.
No more than 500 (5000 for bots) allowed.
Default: 10
"""
for o, u, t, c in self.logpages(number=number, mode='upload',
title=letitle, user=leuser,
repeat=repeat, start=lestart,
end=leend):
yield o, t, u, c
return
def recentchanges(self, number=100, rcstart=None, rcend=None, rcshow=None,
rcdir='older', rctype='edit|new', namespace=None,
includeredirects=True, repeat=False, user=None,
returndict=False, nobots=False, revision=False):
"""
Yield recent changes as Page objects
uses API call:
action=query&list=recentchanges&rctype=edit|new&rclimit=500
Starts with the newest change and fetches the number of changes
specified in the first argument. If repeat is True, it fetches
again.
Options directly from APIs:
---
Parameters:
rcstart - The timestamp to start enumerating from.
rcend - The timestamp to end enumerating.
rcdir - In which direction to enumerate.
One value: newer, older
Default: older
rcnamespace - Filter log entries to only this namespace(s)
Values (separate with '|'):
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
rcprop - Include additional pieces of information
Values (separate with '|'):
user, comment, flags, timestamp, title, ids, sizes,
redirect, patrolled, loginfo
Default: title|timestamp|ids
rcshow - Show only items that meet this criteria.
For example, to see only minor edits done by
logged-in users, set show=minor|!anon
Values (separate with '|'):
minor, !minor, bot, !bot, anon, !anon,
redirect, !redirect, patrolled, !patrolled
rclimit - How many total changes to return.
No more than 500 (5000 for bots) allowed.
Default: 10
rctype - Which types of changes to show.
Values (separate with '|'): edit, new, log
The objects yielded are dependent on parmater returndict.
When true, it yields a tuple composed of a Page object and a dict of
attributes.
When false, it yields a tuple composed of the Page object,
timestamp (unicode), length (int), an empty unicode string, username
or IP address (str), comment (unicode).
If parameter revision is true, this function returns distinct
revisions. If false, it returns only distinct pages.
# TODO: Detection of unregistered users is broken
"""
if rctype is None:
rctype = 'edit|new'
params = {
'action': 'query',
'list': 'recentchanges',
'rcdir': rcdir,
'rctype': rctype,
'rcprop': ['user', 'comment', 'timestamp', 'title', 'ids',
'loginfo',
'sizes'], # , 'flags', 'redirect', 'patrolled'],
'rcnamespace': namespace,
}
params['rclimit'] = int(number)
if nobots and not rcshow:
rcshow = "!bot"
elif nobots and rcshow:
rcshow = rcshow + "|!bot"
if user:
params['rcuser'] = user
if rcstart:
params['rcstart'] = rcstart
if rcend:
params['rcend'] = rcend
if rcshow:
params['rcshow'] = rcshow
if rctype:
params['rctype'] = rctype
if revision:
keyseen = 'revid'
else:
keyseen = 'pageid'
seen = set()
while True:
data = query.GetData(params, self)
if 'error' in data:
raise RuntimeError('%s' % data['error'])
try:
rcData = data['query']['recentchanges']
except KeyError:
raise ServerError(
"The APIs don't return data, the site may be down")
for i in rcData:
if i[keyseen] not in seen:
seen.add(i[keyseen])
page = Page(self, i['title'], defaultNamespace=i['ns'])
if 'comment' in i:
page._comment = i['comment']
if returndict:
yield page, i
else:
comment = u''
if 'comment' in i:
comment = i['comment']
yield (page, i['timestamp'], i['newlen'], True,
i['user'], comment)
if not repeat:
break
def patrol(self, rcid, token=None):
if not self.has_api() or self.versionnumber() < 12:
raise Exception('patrol: no API: not implemented')
if not token:
token = self.getPatrolToken()
params = {
'action': 'patrol',
'rcid': rcid,
'token': token,
}
result = query.GetData(params, self)
if 'error' in result:
raise RuntimeError("%s" % result['error'])
return True
def uncategorizedimages(self, number=10, repeat=False):
"""Yield ImagePages from Special:Uncategorizedimages."""
seen = set()
ns = self.image_namespace()
entryR = re.compile(
'<a href=".+?" title="(?P<title>%s:.+?)">.+?</a>' % ns)
while True:
path = self.uncategorizedimages_address(n=number)
get_throttle()
html = self.getUrl(path)
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = ImagePage(self, title)
yield page
if not repeat:
break
def uncategorizedpages(self, number=10, repeat=False):
"""Yield Pages from Special:Uncategorizedpages."""
seen = set()
while True:
path = self.uncategorizedpages_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def uncategorizedtemplates(self, number=10, repeat=False):
"""Yield Pages from Special:UncategorizedTemplates."""
seen = set()
while True:
path = self.uncategorizedtemplates_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def unusedcategories(self, number=10, repeat=False):
"""Yield Category objects from Special:Unusedcategories."""
import catlib
seen = set()
while True:
path = self.unusedcategories_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = catlib.Category(self, title)
yield page
if not repeat:
break
def wantedcategories(self, number=10, repeat=False):
"""Yield Category objects from Special:wantedcategories."""
import catlib
seen = set()
while True:
path = self.wantedcategories_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" class="new" title="(?P<title>.+?) '
'\(page does not exist\)">.+?</a> .+?\)</li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = catlib.Category(self, title)
yield page
if not repeat:
break
def unusedfiles(self, number=10, repeat=False, extension=None):
"""Yield ImagePage objects from Special:Unusedimages."""
seen = set()
ns = self.image_namespace()
entryR = re.compile(
'<a href=".+?" title="(?P<title>%s:.+?)">.+?</a>' % ns)
while True:
path = self.unusedfiles_address(n=number)
get_throttle()
html = self.getUrl(path)
for m in entryR.finditer(html):
fileext = None
title = m.group('title')
if extension:
fileext = title[len(title) - 3:]
if title not in seen and fileext == extension:
## Check whether the media is used in a Proofread page
# code disabled because it slows this method down, and
# because it is unclear what it's supposed to do.
#basename = title[6:]
#page = Page(self, 'Page:' + basename)
#if not page.exists():
seen.add(title)
image = ImagePage(self, title)
yield image
if not repeat:
break
def withoutinterwiki(self, number=10, repeat=False):
"""Yield Pages without language links from Special:Withoutinterwiki."""
seen = set()
while True:
path = self.withoutinterwiki_address(n=number)
get_throttle()
html = self.getUrl(path)
entryR = re.compile(
'<li><a href=".+?" title="(?P<title>.+?)">.+?</a></li>')
for m in entryR.finditer(html):
title = m.group('title')
if title not in seen:
seen.add(title)
page = Page(self, title)
yield page
if not repeat:
break
def randompage(self, redirect=False):
if self.has_api() and self.versionnumber() >= 12:
params = {
'action': 'query',
'list': 'random',
#'rnnamespace': '0',
'rnlimit': '1',
#'': '',
}
if redirect:
params['rnredirect'] = 1
data = query.GetData(params, self)
return Page(self, data['query']['random'][0]['title'])
else:
if redirect:
"""Yield random redirect page via Special:RandomRedirect."""
html = self.getUrl(self.randomredirect_address())
else:
"""Yield random page via Special:Random"""
html = self.getUrl(self.random_address())
m = re.search('var wgPageName = "(?P<title>.+?)";', html)
if m is not None:
return Page(self, m.group('title'))
def randomredirectpage(self):
return self.randompage(redirect=True)
def allpages(self, start='!', namespace=None, includeredirects=True,
throttle=True):
"""
Yield all Pages in alphabetical order.
Parameters:
start Start at this page. By default, it starts at '!', and yields
all pages.
namespace Yield all pages in this namespace; defaults to 0.
MediaWiki software will only return pages in one namespace
at a time.
If includeredirects is False, redirects will not be found.
It is advised not to use this directly, but to use the
AllpagesPageGenerator from pagegenerators.py instead.
"""
if namespace is None:
page = Page(self, start)
namespace = page.namespace()
start = page.title(withNamespace=False)
if not self.has_api():
for page in self._allpagesOld(start, namespace, includeredirects,
throttle):
yield page
return
params = {
'action': 'query',
'list': 'allpages',
'aplimit': config.special_page_limit,
'apnamespace': namespace,
'apfrom': start
}
if not includeredirects:
params['apfilterredir'] = 'nonredirects'
elif includeredirects == 'only':
params['apfilterredir'] = 'redirects'
while True:
if throttle:
get_throttle()
data = query.GetData(params, self)
if verbose:
debug(u'allpages>>> data.keys() %s' % data.keys())
if 'warnings' in data:
warning = data['warnings']['allpages']['*']
raise RuntimeError("API query warning: %s" % warning)
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if not 'allpages' in data['query']:
raise RuntimeError("API query error, no pages found: %s" % data)
count = 0
for p in data['query']['allpages']:
count += 1
yield Page(self, p['title'])
if count >= config.special_page_limit:
break
if 'query-continue' in data and count < params['aplimit']:
params.update(data['query-continue']['allpages'])
else:
break
def _allpagesOld(self, start='!', namespace=0, includeredirects=True,
throttle=True):
"""
Yield all Pages from Special:Allpages.
This method doesn't work with MediaWiki 1.14 because of a change to
Special:Allpages. It is only left here for compatibility with older
MediaWiki versions, which don't support the API.
Parameters:
start Start at this page. By default, it starts at '!', and yields
all pages.
namespace Yield all pages in this namespace; defaults to 0.
MediaWiki software will only return pages in one namespace
at a time.
If includeredirects is False, redirects will not be found.
If includeredirects equals the string 'only', only redirects
will be found. Note that this has not been tested on older
versions of the MediaWiki code.
It is advised not to use this directly, but to use the
AllpagesPageGenerator from pagegenerators.py instead.
"""
monobook_error = True
if start == '':
start = '!'
while True:
# encode Non-ASCII characters in hexadecimal format (e.g. %F6)
start = start.encode(self.encoding())
start = urllib.quote(start)
# load a list which contains a series of article names (always 480)
path = self.allpages_address(start, namespace)
output(
u'Retrieving Allpages special page for %s from %s, namespace %i'
% (repr(self), start, namespace))
returned_html = self.getUrl(path)
# Try to find begin and end markers
try:
# In 1.4, another table was added above the navigational links
if self.versionnumber() >= 4:
begin_s = '</table><hr /><table'
end_s = '</table'
else:
begin_s = '<table'
end_s = '</table'
ibegin = returned_html.index(begin_s)
iend = returned_html.index(end_s, ibegin + 3)
except ValueError:
if monobook_error:
raise ServerError("Couldn't extract allpages special page. "
"Make sure you're using MonoBook skin.")
else:
# No list of wikilinks
break
monobook_error = False
# remove the irrelevant sections
returned_html = returned_html[ibegin:iend]
if self.versionnumber() == 2:
R = re.compile('/wiki/(.*?)\" *class=[\'\"]printable')
elif self.versionnumber() < 5:
# Apparently the special code for redirects was added in 1.5
R = re.compile('title ?=\"(.*?)\"')
elif not includeredirects:
R = re.compile('\<td(?: width="33%")?\>\<a href=\"\S*\" +title '
'?="(.*?)"')
elif includeredirects == 'only':
R = re.compile(
'\<td(?: width="33%")?>\<[^\<\>]*allpagesredirect\"\>'
'\<a href=\"\S*\" +title ?="(.*?)"')
else:
R = re.compile('title ?=\"(.*?)\"')
# Count the number of useful links on this page
n = 0
for hit in R.findall(returned_html):
# count how many articles we found on the current page
n = n + 1
if self.versionnumber() == 2:
yield Page(self, url2link(hit, site=self, insite=self))
else:
yield Page(self, hit)
# save the last hit, so that we know where to continue when we
# finished all articles on the current page. Append a '!' so
# that we don't yield a page twice.
start = Page(self, hit).title(withNamespace=False) + '!'
# A small shortcut: if there are less than 100 pages listed on this
# page, there is certainly no next. Probably 480 would do as well,
# but better be safe than sorry.
if n < 100:
if (not includeredirects) or includeredirects == 'only':
# Maybe there were only so few because the rest is or is
# not a redirect
R = re.compile('title ?=\"(.*?)\"')
allLinks = R.findall(returned_html)
if len(allLinks) < 100:
break
elif n == 0:
# In this special case, no pages of the requested type
# were found, and "start" will remain and be
# double-encoded.
# Use the last page as the start of the next page.
start = Page(self, allLinks[-1]).title(
withNamespace=False) + '!'
else:
break
else:
# Don't send a new request if "Next page (pagename)" isn't
# present
Rnonext = re.compile(
r'title="(Special|%s):.+?">%s</a></td></tr></table>'
% (self.mediawiki_message('nstab-special'),
re.escape(self.mediawiki_message(
'nextpage')).replace('\$1', '.*?')))
if not Rnonext.search(full_returned_html):
break
def prefixindex(self, prefix, namespace=0, includeredirects=True):
"""Yield all pages with a given prefix.
Parameters:
prefix The prefix of the pages.
namespace Namespace number; defaults to 0.
MediaWiki software will only return pages in one namespace
at a time.
If includeredirects is False, redirects will not be found.
If includeredirects equals the string 'only', only redirects
will be found. Note that this has not been tested on older
versions of the MediaWiki code.
It is advised not to use this directly, but to use the
PrefixingPageGenerator from pagegenerators.py instead.
"""
for page in self.allpages(start=prefix, namespace=namespace,
includeredirects=includeredirects):
if page.title(withNamespace=False).startswith(prefix):
yield page
else:
break
def protectedpages(self, namespace=None, type='edit', lvl=0):
""" Yield all the protected pages, using Special:ProtectedPages
* namespace is a namespace number
* type can be 'edit' or 'move
* lvl : protection level, can be 0, 'autoconfirmed', or 'sysop'
"""
# Avoid problems of encoding and stuff like that, let it divided please
url = self.protectedpages_address()
url += '&type=%s&level=%s' % (type, lvl)
# /!\ if namespace seems simpler, but returns false when ns=0
if namespace is not None:
url += '&namespace=%s' % namespace
parser_text = self.getUrl(url)
while True:
m = re.findall(
r'<li><a href=".*?" title=".*?">(.*?)</a>.*?<small>\((.*?)\)'
r'</small>.*?\((.*?)\)</li>', parser_text)
for data in m:
title = data[0]
size = data[1]
status = data[2]
yield Page(self, title)
nextpage = re.findall(
r'<.ul>\(.*?\).*?\(.*?\).*?\(<a href="(.*?)".*?</a>\) '
r'+?\(<a href=', parser_text)
if nextpage != []:
parser_text = self.getUrl(nextpage[0].replace('&', '&'))
continue
else:
break
def linksearch(self, siteurl, limit=500, euprotocol=None):
"""Yield Pages from results of Special:Linksearch for 'siteurl'."""
cache = []
R = re.compile('title ?=\"([^<>]*?)\">[^<>]*</a></li>')
urlsToRetrieve = [siteurl]
if not siteurl.startswith('*.'):
urlsToRetrieve.append('*.' + siteurl)
if self.has_api() and self.versionnumber() >= 11:
output(u'Querying API exturlusage...')
for url in urlsToRetrieve:
params = {
'action': 'query',
'list': 'exturlusage',
'eulimit': limit,
'euquery': url,
}
if euprotocol:
params['euprotocol'] = euprotocol
count = 0
while True:
data = query.GetData(params, self)
if data['query']['exturlusage'] == []:
break
for pages in data['query']['exturlusage']:
count += 1
if not siteurl in pages['title']:
# the links themselves have similar form
if pages['pageid'] not in cache:
cache.append(pages['pageid'])
yield Page(self, pages['title'],
defaultNamespace=pages['ns'])
if count >= limit:
break
if 'query-continue' in data and count < limit:
params.update(data[
u'query-continue'][u'exturlusage'])
else:
break
else:
output(u'Querying [[Special:Linksearch]]...')
for url in urlsToRetrieve:
offset = 0
while True:
path = self.linksearch_address(url, limit=limit,
offset=offset)
get_throttle()
html = self.getUrl(path)
# restricting the HTML source:
# when in the source, this div marks the beginning of the
# input
loc = html.find('<div class="mw-spcontent">')
if loc > -1:
html = html[loc:]
#when in the source, marks the end of the linklist
loc = html.find('<div class="printfooter">')
if loc > -1:
html = html[:loc]
# our regex fetches internal page links and the link they
# contain
links = R.findall(html)
if not links:
#no more page to be fetched for that link
break
for title in links:
if not siteurl in title:
# the links themselves have similar form
if title in cache:
continue
else:
cache.append(title)
yield Page(self, title)
offset += limit
def linkto(self, title, othersite=None):
"""Return unicode string in the form of a wikilink to 'title'
Use optional Site argument 'othersite' to generate an interwiki link
from the other site to the current site.
"""
if othersite and othersite.lang != self.lang:
return u'[[%s:%s]]' % (self.lang, title)
else:
return u'[[%s]]' % title
def isInterwikiLink(self, s):
"""Return True if s is in the form of an interwiki link.
Interwiki links have the form "foo:bar" or ":foo:bar" where foo is a
known language code or family. Called recursively if the first part
of the link refers to this site's own family and/or language.
"""
s = s.replace("_", " ").strip(" ").lstrip(":")
if not ':' in s:
return False
first, rest = s.split(':', 1)
# interwiki codes are case-insensitive
first = first.lower().strip(" ")
# commons: forwards interlanguage links to wikipedia:, etc.
if self.family.interwiki_forward:
interlangTargetFamily = Family(self.family.interwiki_forward)
else:
interlangTargetFamily = self.family
if self.getNamespaceIndex(first):
return False
if first in interlangTargetFamily.langs:
if first == self.lang:
return self.isInterwikiLink(rest)
else:
return True
if first in self.family.get_known_families(site=self):
if first == self.family.name:
return self.isInterwikiLink(rest)
else:
return True
return False
def getmagicwords(self, word):
"""Return list of localized "word" magic words for the site."""
if self.versionnumber() <= 13:
raise NotImplementedError
return self.siteinfo('magicwords').get(word)
def redirectRegex(self):
"""Return a compiled regular expression matching on redirect pages.
Group 1 in the regex match object will be the target title.
"""
#NOTE: this is needed, since the API can give false positives!
default = 'REDIRECT'
keywords = self.versionnumber() > 13 and self.getmagicwords('redirect')
if keywords:
pattern = r'(?:' + '|'.join(keywords) + ')'
else:
# no localized keyword for redirects
pattern = r'#%s' % default
if self.versionnumber() > 12:
# in MW 1.13 (at least) a redirect directive can follow whitespace
prefix = r'\s*'
else:
prefix = r'[\r\n]*'
# A redirect starts with hash (#), followed by a keyword, then
# arbitrary stuff, then a wikilink. The wikilink may contain
# a label, although this is not useful.
return re.compile(prefix + pattern
+ '\s*:?\s*\[\[(.+?)(?:\|.*?)?\]\]',
re.IGNORECASE | re.UNICODE | re.DOTALL)
def pagenamecodes(self, default=True):
"""Return list of localized PAGENAME tags for the site."""
return self.versionnumber() > 13 and self.getmagicwords(
'pagename') or u'PAGENAME'
def pagename2codes(self, default=True):
"""Return list of localized PAGENAMEE tags for the site."""
return self.versionnumber() > 13 and self.getmagicwords(
'pagenamee') or u'PAGENAMEE'
def resolvemagicwords(self, wikitext):
"""Replace the {{ns:xx}} marks in a wikitext with the namespace names"""
defaults = []
for namespace in self.family.namespaces.itervalues():
value = namespace.get('_default', None)
if value:
if isinstance(value, list):
defaults.append(value[0])
else:
defaults.append(value)
named = re.compile(u'{{ns:(' + '|'.join(defaults) + ')}}', re.I)
def replacenamed(match):
return self.normalizeNamespace(match.group(1))
wikitext = named.sub(replacenamed, wikitext)
numbered = re.compile('{{ns:(-?\d{1,2})}}', re.I)
def replacenumbered(match):
return self.namespace(int(match.group(1)))
return numbered.sub(replacenumbered, wikitext)
# The following methods are for convenience, so that you can access
# methods of the Family class easier.
def encoding(self):
"""Return the current encoding for this site."""
return self.family.code2encoding(self.lang)
def encodings(self):
"""Return a list of all historical encodings for this site."""
return self.family.code2encodings(self.lang)
def category_namespace(self):
"""Return the canonical name of the Category namespace on this site."""
# equivalent to self.namespace(14)?
return self.family.category_namespace(self.lang)
def category_namespaces(self):
"""Return a list of all valid names for the Category namespace."""
return self.family.category_namespaces(self.lang)
def category_redirects(self):
return self.family.category_redirects(self.lang)
def image_namespace(self, fallback='_default'):
"""Return the canonical name of the Image namespace on this site."""
# equivalent to self.namespace(6)?
return self.family.image_namespace(self.lang, fallback)
def template_namespace(self, fallback='_default'):
"""Return the canonical name of the Template namespace on this site."""
# equivalent to self.namespace(10)?
return self.family.template_namespace(self.lang, fallback)
def export_address(self):
"""Return URL path for Special:Export."""
return self.family.export_address(self.lang)
def query_address(self):
"""Return URL path + '?' for query.php (if enabled on this Site)."""
return self.family.query_address(self.lang)
def api_address(self):
"""Return URL path + '?' for api.php (if enabled on this Site)."""
return self.family.api_address(self.lang)
def apipath(self):
"""Return URL path for api.php (if enabled on this Site)."""
return self.family.apipath(self.lang)
def scriptpath(self):
"""Return URL prefix for scripts on this site ({{SCRIPTPATH}} value)"""
return self.family.scriptpath(self.lang)
def protocol(self):
"""Return protocol ('http' or 'https') for access to this site."""
return self.family.protocol(self.lang)
def hostname(self):
"""Return host portion of site URL."""
return self.family.hostname(self.lang)
def path(self):
"""Return URL path for index.php on this Site."""
return self.family.path(self.lang)
def dbName(self):
"""Return MySQL database name."""
return self.family.dbName(self.lang)
def move_address(self):
"""Return URL path for Special:Movepage."""
return self.family.move_address(self.lang)
def delete_address(self, s):
"""Return URL path to delete title 's'."""
return self.family.delete_address(self.lang, s)
def undelete_view_address(self, s, ts=''):
"""Return URL path to view Special:Undelete for title 's'
Optional argument 'ts' returns path to view specific deleted version.
"""
return self.family.undelete_view_address(self.lang, s, ts)
def undelete_address(self):
"""Return URL path to Special:Undelete."""
return self.family.undelete_address(self.lang)
def protect_address(self, s):
"""Return URL path to protect title 's'."""
return self.family.protect_address(self.lang, s)
def unprotect_address(self, s):
"""Return URL path to unprotect title 's'."""
return self.family.unprotect_address(self.lang, s)
def put_address(self, s):
"""Return URL path to submit revision to page titled 's'."""
return self.family.put_address(self.lang, s)
def get_address(self, s):
"""Return URL path to retrieve page titled 's'."""
title = s.replace(' ', '_')
return self.family.get_address(self.lang, title)
def nice_get_address(self, s):
"""Return shorter URL path to retrieve page titled 's'."""
return self.family.nice_get_address(self.lang, s)
def edit_address(self, s):
"""Return URL path for edit form for page titled 's'."""
return self.family.edit_address(self.lang, s)
def watch_address(self, s):
"""Return URL path for watching the titled 's'."""
return self.family.watch_address(self.lang, s)
def unwatch_address(self, s):
"""Return URL path for unwatching the titled 's'."""
return self.family.unwatch_address(self.lang, s)
def purge_address(self, s):
"""Return URL path to purge cache and retrieve page 's'."""
return self.family.purge_address(self.lang, s)
def block_address(self):
"""Return path to block an IP address."""
return self.family.block_address(self.lang)
def unblock_address(self):
"""Return path to unblock an IP address."""
return self.family.unblock_address(self.lang)
def blocksearch_address(self, s, usertype):
"""Return path to search for blocks on IP address 's'."""
if not self.has_api() or self.versionnumber() < 12:
return self.family.blocksearch_address(self.lang, s)
params = {
'bk%s' % usertype: s,
'action': 'query',
'list': 'blocks',
}
return query.GetData(params, self, back_response=True)
def linksearch_address(self, s, limit=500, offset=0):
"""Return path to Special:Linksearch for target 's'."""
return self.family.linksearch_address(self.lang, s, limit=limit,
offset=offset)
def search_address(self, q, n=50, ns=0):
"""Return path to Special:Search for query 'q'."""
return self.family.search_address(self.lang, q, n, ns)
def allpages_address(self, s, ns=0):
"""Return path to Special:Allpages."""
return self.family.allpages_address(self.lang, start=s, namespace=ns)
def log_address(self, n=50, mode='', user=''):
"""Return path to Special:Log."""
return self.family.log_address(self.lang, n, mode, user)
def newpages_address(self, n=50, namespace=0):
"""Return path to Special:Newpages."""
return self.family.newpages_address(self.lang, n, namespace)
def longpages_address(self, n=500):
"""Return path to Special:Longpages."""
return self.family.longpages_address(self.lang, n)
def shortpages_address(self, n=500):
"""Return path to Special:Shortpages."""
return self.family.shortpages_address(self.lang, n)
def unusedfiles_address(self, n=500):
"""Return path to Special:Unusedimages."""
return self.family.unusedfiles_address(self.lang, n)
def categories_address(self, n=500):
"""Return path to Special:Categories."""
return self.family.categories_address(self.lang, n)
def deadendpages_address(self, n=500):
"""Return path to Special:Deadendpages."""
return self.family.deadendpages_address(self.lang, n)
def ancientpages_address(self, n=500):
"""Return path to Special:Ancientpages."""
return self.family.ancientpages_address(self.lang, n)
def lonelypages_address(self, n=500):
"""Return path to Special:Lonelypages."""
return self.family.lonelypages_address(self.lang, n)
def protectedpages_address(self, n=500):
"""Return path to Special:ProtectedPages"""
return self.family.protectedpages_address(self.lang, n)
def unwatchedpages_address(self, n=500):
"""Return path to Special:Unwatchedpages."""
return self.family.unwatchedpages_address(self.lang, n)
def uncategorizedcategories_address(self, n=500):
"""Return path to Special:Uncategorizedcategories."""
return self.family.uncategorizedcategories_address(self.lang, n)
def uncategorizedimages_address(self, n=500):
"""Return path to Special:Uncategorizedimages."""
return self.family.uncategorizedimages_address(self.lang, n)
def uncategorizedpages_address(self, n=500):
"""Return path to Special:Uncategorizedpages."""
return self.family.uncategorizedpages_address(self.lang, n)
def uncategorizedtemplates_address(self, n=500):
"""Return path to Special:Uncategorizedpages."""
return self.family.uncategorizedtemplates_address(self.lang, n)
def unusedcategories_address(self, n=500):
"""Return path to Special:Unusedcategories."""
return self.family.unusedcategories_address(self.lang, n)
def wantedcategories_address(self, n=500):
"""Return path to Special:Wantedcategories."""
return self.family.wantedcategories_address(self.lang, n)
def withoutinterwiki_address(self, n=500):
"""Return path to Special:Withoutinterwiki."""
return self.family.withoutinterwiki_address(self.lang, n)
def references_address(self, s):
"""Return path to Special:Whatlinksere for page 's'."""
return self.family.references_address(self.lang, s)
def allmessages_address(self):
"""Return path to Special:Allmessages."""
return self.family.allmessages_address(self.lang)
def upload_address(self):
"""Return path to Special:Upload."""
return self.family.upload_address(self.lang)
def double_redirects_address(self, default_limit=True):
"""Return path to Special:Doubleredirects."""
return self.family.double_redirects_address(self.lang, default_limit)
def broken_redirects_address(self, default_limit=True):
"""Return path to Special:Brokenredirects."""
return self.family.broken_redirects_address(self.lang, default_limit)
def random_address(self):
"""Return path to Special:Random."""
return self.family.random_address(self.lang)
def randomredirect_address(self):
"""Return path to Special:RandomRedirect."""
return self.family.randomredirect_address(self.lang)
def login_address(self):
"""Return path to Special:Userlogin."""
return self.family.login_address(self.lang)
def captcha_image_address(self, id):
"""Return path to Special:Captcha for image 'id'."""
return self.family.captcha_image_address(self.lang, id)
def watchlist_address(self):
"""Return path to Special:Watchlist editor."""
return self.family.watchlist_address(self.lang)
def contribs_address(self, target, limit=500, offset=''):
"""Return path to Special:Contributions for user 'target'."""
return self.family.contribs_address(self.lang, target, limit, offset)
def globalusers_address(self, target='', limit=500, offset='', group=''):
"""Return path to Special:GlobalUsers for user 'target' and/or
group 'group'.
"""
return self.family.globalusers_address(self.lang, target, limit,
offset, group)
def version(self):
"""Return MediaWiki version number as a string."""
return self.family.version(self.lang)
def versionnumber(self):
"""Return an int identifying MediaWiki version.
Currently this is implemented as returning the minor version
number; i.e., 'X' in version '1.X.Y'
"""
return self.family.versionnumber(self.lang)
def live_version(self):
"""Return the 'real' version number found on [[Special:Version]]
Return value is a tuple (int, int, str) of the major and minor
version numbers and any other text contained in the version.
"""
global htmldata
if not hasattr(self, "_mw_version"):
PATTERN = r"^(?:: )?([0-9]+)\.([0-9]+)(.*)$"
versionpage = self.getUrl(self.get_address("Special:Version"))
htmldata = BeautifulSoup(versionpage, convertEntities="html")
# try to find the live version
versionlist = []
# 1st try is for mw < 1.17wmf1
versionlist.append(lambda: htmldata.findAll(
text="MediaWiki")[1].parent.nextSibling)
# 2nd try is for mw >=1.17wmf1
versionlist.append(lambda: htmldata.body.table.findAll(
'td')[1].contents[0])
# 3rd try is for mw >=1.21
versionlist.append(lambda: htmldata.body.table.findAll(
'td')[1].contents[0].contents[0])
# 4th uses family file which is not live
versionlist.append(lambda: self.family.version(self.lang))
for versionfunc in versionlist:
try:
versionstring = versionfunc()
except:
continue
m = re.match(PATTERN, str(versionstring).strip())
if m:
break
else:
raise Error(u'Cannot find any live version!')
self._mw_version = (int(m.group(1)), int(m.group(2)), m.group(3))
return self._mw_version
def checkCharset(self, charset):
"""Warn if charset returned by wiki doesn't match family file."""
fromFamily = self.encoding()
assert fromFamily.lower() == charset.lower(), \
"charset for %s changed from %s to %s" \
% (repr(self), fromFamily, charset)
if fromFamily.lower() != charset.lower():
raise ValueError("code2encodings has wrong charset for %s. It "
"should be %s, but is %s"
% (repr(self), charset, self.encoding()))
def shared_image_repository(self):
"""Return a tuple of image repositories used by this site."""
return self.family.shared_image_repository(self.lang)
def shared_data_repository(self, transcluded=False):
"""Return a tuple of image repositories used by this site."""
return self.family.shared_data_repository(self.lang, transcluded)
@property
def has_image_repository(self):
"""Return True if site has a shared image repository like commons"""
code, fam = self.shared_image_repository()
return bool(code or fam)
@property
def has_data_repository(self):
"""Return True if site has a shared image repository like wikidata"""
code, fam = self.shared_data_repository()
return bool(code or fam)
@property
def has_transcluded_data(self):
"""Return True if site has a shared image repository like wikidata"""
code, fam = self.shared_data_repository(True)
return bool(code or fam)
def image_repository(self):
"""Return Site object for image repository e.g. commons."""
code, fam = self.shared_image_repository()
if bool(code or fam):
return pywikibot.getSite(code, fam, self.username())
def data_repository(self):
"""Return Site object for data repository e.g. wikidata."""
code, fam = self.shared_data_repository()
if bool(code or fam):
return pywikibot.getSite(code, fam, self.username())
def is_image_repository(self):
"""Return True if Site object is the image repository."""
return self is self.image_repository()
def is_data_repository(self):
"""Return True if Site object is the data repository."""
return self is self.data_repository()
def category_on_one_line(self):
"""Return True if this site wants all category links on one line."""
return self.lang in self.family.category_on_one_line
def interwiki_putfirst(self):
"""Return list of language codes for ordering of interwiki links."""
return self.family.interwiki_putfirst.get(self.lang, None)
def interwiki_putfirst_doubled(self, list_of_links):
# TODO: is this even needed? No family in the framework has this
# dictionary defined!
if self.lang in self.family.interwiki_putfirst_doubled:
if len(list_of_links) >= self.family.interwiki_putfirst_doubled[
self.lang][0]:
list_of_links2 = []
for lang in list_of_links:
list_of_links2.append(lang.language())
list = []
for lang in self.family.interwiki_putfirst_doubled[
self.lang][1]:
try:
list.append(list_of_links[list_of_links2.index(lang)])
except ValueError:
pass
return list
else:
return False
else:
return False
def getSite(self, code):
"""Return Site object for language 'code' in this Family."""
return getSite(code=code, fam=self.family, user=self.user)
def namespace(self, num, all=False):
"""Return string containing local name of namespace 'num'.
If optional argument 'all' is true, return a tuple of all recognized
values for this namespace.
"""
return self.family.namespace(self.lang, num, all=all)
def normalizeNamespace(self, value):
"""Return canonical name for namespace 'value' in this Site's language.
'Value' should be a string or unicode.
If no match, return 'value' unmodified.
"""
if not self.nocapitalize:
# make sure first letter gets normalized; there is at least
# one case ("İ") in which s.lower().upper() != s
value = value[0].lower().upper() + value[1:]
return self.family.normalizeNamespace(self.lang, value)
def getNamespaceIndex(self, namespace):
"""Given a namespace name, return its int index, or None if invalid."""
return self.family.getNamespaceIndex(self.lang, namespace)
def language(self):
"""Return Site's language code."""
return self.lang
def fam(self):
"""Return Family object for this Site."""
return self.family
def disambcategory(self):
"""Return Category in which disambig pages are listed."""
import catlib
try:
return catlib.Category(self,
self.namespace(14) + ':' +
self.family.disambcatname[self.lang])
except KeyError:
raise NoPage
def getToken(self, getalways=True, getagain=False, sysop=False):
index = self._userIndex(sysop)
if getagain or (getalways and self._token[index] is None):
output(u'Getting a token.')
self._load(sysop=sysop, force=True)
if self._token[index] is not None:
return self._token[index]
else:
return False
def getPatrolToken(self, sysop=False):
index = self._userIndex(sysop)
if self._patrolToken[index] is None:
output(u'Getting a patrol token.')
params = {
'action': 'query',
'list': 'recentchanges',
'rctype': 'new',
'rcshow': '!patrolled',
'rctoken': 'patrol',
'rclimit': 1,
}
data = query.GetData(params, self)
if 'error' in data:
raise RuntimeError('%s' % data['error'])
elif 'warnings' in data:
raise RuntimeError('%s' % data['warnings'])
try:
rcData = data['query']['recentchanges']
except KeyError:
raise ServerError(
"The APIs don't return data, the site may be down")
self._patrolToken[index] = rcData[0]['patroltoken']
return self._patrolToken[index]
def getFilesFromAnHash(self, hash_found=None):
""" Function that uses APIs to give the images that has the same hash.
Useful to find duplicates or nowcommons.
NOTE: it returns also the image itself, if you don't want it, just
filter the list returned.
NOTE 2: it returns the image WITHOUT the image namespace.
"""
if self.versionnumber() < 12:
return
# If the hash is none return None and not continue
if hash_found is None:
return
# Now get all the images with the same hash
#action=query&format=xml&list=allimages&aisha1=%s
image_namespace = "%s:" % self.image_namespace() # Image:
params = {
'action': 'query',
'list': 'allimages',
# Workaround for bug pywikipediabot-Bugs-3596617 / bugzilla:43177
'aisha1': hash_found.lower(),
}
allimages = query.GetData(params, self)['query']['allimages']
files = list()
for imagedata in allimages:
image = imagedata[u'name']
files.append(image)
return files
def getParsedString(self, string, keeptags=[u'*']):
"""Parses the string with API and returns html content.
@param string: String that should be parsed.
@type string: string
@param keeptags: Defines which tags (wiki, HTML) should NOT be
removed.
@type keeptags: list
Returns the string given, parsed through the wiki parser.
"""
if not self.has_api():
raise Exception('parse: no API: not implemented')
# call the wiki to get info
params = {
u'action': u'parse',
u'text': string,
}
pywikibot.get_throttle()
pywikibot.output(u"Parsing string through the wiki parser via API.")
result = query.GetData(params, self)
r = result[u'parse'][u'text'][u'*']
# disable/remove comments
r = pywikibot.removeDisabledParts(r, tags=['comments']).strip()
# disable/remove ALL tags
if not (keeptags == [u'*']):
r = removeHTMLParts(r, keeptags=keeptags).strip()
return r
def getExpandedString(self, string):
"""Expands the string with API and returns wiki content.
@param string: String that should be expanded.
@type string: string
Returns the string given, expanded through the wiki parser.
"""
if not self.has_api():
raise Exception('expandtemplates: no API: not implemented')
# call the wiki to get info
params = {
u'action': u'expandtemplates',
u'text': string,
}
pywikibot.get_throttle()
pywikibot.output(u"Expanding string through the wiki parser via API.")
result = query.GetData(params, self)
r = result[u'expandtemplates'][u'*']
return r
# Caches to provide faster access
_sites = {}
_namespaceCache = {}
def getSite(code=None, fam=None, user=None, noLogin=False):
if code is None:
code = default_code
if fam is None:
fam = default_family
if user is None:
try:
user = config.usernames[fam][code]
except KeyError:
user = None
key = '%s:%s:%s' % (fam, code, user)
if not key in _sites:
_sites[key] = Site(code=code, fam=fam, user=user)
ret = _sites[key]
if not ret.family.isPublic(code) and not noLogin:
ret.forceLogin()
return ret
def setSite(site):
global default_code, default_family
default_code = site.language()
default_family = site.family
# Command line parsing and help
def calledModuleName():
"""Return the name of the module calling this function.
This is required because the -help option loads the module's docstring
and because the module name will be used for the filename of the log.
"""
# get commandline arguments
called = sys.argv[0].strip()
if ".py" in called: # could end with .pyc, .pyw, etc. on some platforms
# clip off the '.py?' filename extension
called = called[:called.rindex('.py')]
return os.path.basename(called)
def _decodeArg(arg):
# We may pass a Unicode string to a script upon importing and calling
# main() from another script.
if isinstance(arg, unicode):
return arg
if sys.platform == 'win32':
if config.console_encoding in ('cp437', 'cp850'):
# Western Windows versions give parameters encoded as windows-1252
# even though the console encoding is cp850 or cp437.
return unicode(arg, 'windows-1252')
elif config.console_encoding == 'cp852':
# Central/Eastern European Windows versions give parameters encoded
# as windows-1250 even though the console encoding is cp852.
return unicode(arg, 'windows-1250')
else:
return unicode(arg, config.console_encoding)
else:
# Linux uses the same encoding for both.
# I don't know how non-Western Windows versions behave.
return unicode(arg, config.console_encoding)
def handleArgs(*args):
"""Handle standard command line arguments, return the rest as a list.
Takes the commandline arguments, converts them to Unicode, processes all
global parameters such as -lang or -log. Returns a list of all arguments
that are not global. This makes sure that global arguments are applied
first, regardless of the order in which the arguments were given.
args may be passed as an argument, thereby overriding sys.argv
"""
global default_code, default_family, verbose, simulate
# get commandline arguments if necessary
if not args:
args = sys.argv[1:]
# get the name of the module calling this function. This is
# required because the -help option loads the module's docstring and because
# the module name will be used for the filename of the log.
moduleName = calledModuleName()
nonGlobalArgs = []
username = None
do_help = False
for arg in args:
arg = _decodeArg(arg)
if arg == '-help':
do_help = True
elif arg.startswith('-family:'):
default_family = arg[len("-family:"):]
elif arg.startswith('-lang:'):
default_code = arg[len("-lang:"):]
elif arg.startswith("-user:"):
username = arg[len("-user:"):]
elif arg.startswith('-putthrottle:'):
config.put_throttle = int(arg[len("-putthrottle:"):])
put_throttle.setDelay()
elif arg.startswith('-pt:'):
config.put_throttle = int(arg[len("-pt:"):])
put_throttle.setDelay()
elif arg.startswith("-maxlag:"):
config.maxlag = int(arg[len("-maxlag:"):])
elif arg == '-log':
setLogfileStatus(True)
elif arg.startswith('-log:'):
setLogfileStatus(True, arg[len("-log:"):])
elif arg.startswith('-loghandler:'):
config.loghandler = arg[len('-loghandler:'):]
elif arg == '-nolog':
setLogfileStatus(False)
elif arg in ('-cosmeticchanges', '-cc'):
config.cosmetic_changes = not config.cosmetic_changes
output(u'NOTE: option cosmetic_changes is %s\n'
% config.cosmetic_changes)
elif arg == '-simulate':
simulate = True
elif arg == '-dry':
output(u"Usage of -dry is deprecated; use -simulate instead.")
simulate = True
elif arg == '-debug':
if not logger:
init_handlers()
logger.setLevel(DEBUG)
config.special_page_limit = 500
elif arg in ('-verbose', '-v'):
verbose += 1
elif arg == '-daemonize':
import daemonize
daemonize.daemonize()
elif arg.startswith('-daemonize:'):
import daemonize
daemonize.daemonize(redirect_std=arg[len('-daemonize:'):])
else:
# argument not global -> specific bot script will take care
nonGlobalArgs.append(arg)
if username:
config.usernames[default_family][default_code] = username
#init_handlers()
# TEST for bug #3081100
if unicode_error:
output("""
================================================================================
\03{lightyellow}WARNING:\03{lightred} your python version might trigger issue #3081100\03{default}
More information: See https://sourceforge.net/support/tracker.php?aid=3081100
\03{lightyellow}Please update python to 2.7.2+ if you are running on wikimedia sites!\03{default}
================================================================================
""")
if verbose:
output(u'Pywikibot %s' % (version.getversion()))
output(u'Python %s' % sys.version)
if do_help:
showHelp()
sys.exit(0)
return nonGlobalArgs
def showHelp(moduleName=None):
# the parameter moduleName is deprecated and should be left out.
moduleName = moduleName or calledModuleName()
try:
moduleName = moduleName[moduleName.rindex("\\") + 1:]
except ValueError:
# There was no \ in the module name, so presumably no problem
pass
globalHelp = u'''
Global arguments available for all bots:
-dir:PATH Read the bot's configuration data from directory given by
PATH, instead of from the default directory.
-lang:xx Set the language of the wiki you want to work on, overriding
the configuration in user-config.py. xx should be the
language code.
-family:xyz Set the family of the wiki you want to work on, e.g.
wikipedia, wiktionary, wikitravel, ...
This will override the configuration in user-config.py.
-user:xyz Log in as user 'xyz' instead of the default username.
-daemonize:xyz Immediately return control to the terminal and redirect
stdout and stderr to xyz (only use for bots that require
no input from stdin).
-help Show this help text.
-log Enable the logfile, using the default filename
"%s.log"
Logs will be stored in the logs subdirectory.
-log:xyz Enable the logfile, using 'xyz' as the filename.
-nolog Disable the logfile (if it is enabled by default).
-loghandler:xyz Choose a value for 'xyz' from 'TRFH' (TimedRotatingFile-
Handler) or 'RFH' (RotatingFileHandler). Has to be defined
before '-log' on command line.
-maxlag Sets a new maxlag parameter to a number of seconds. Defer bot
edits during periods of database server lag. Default is set by
config.py
-putthrottle:n Set the minimum time (in seconds) the bot will wait between
-pt:n saving pages.
-verbose Have the bot provide additional output that may be
-v useful in debugging.
-cosmeticchanges Toggles the cosmetic_changes setting made in config.py or
-cc user_config.py to its inverse and overrules it. All other
settings and restrictions are untouched.
-simulate Disables writing to the server. Useful for testing and
debugging of new code (if given, doesn't do any real
changes, but only shows what would have been changed).
''' % moduleName
output(globalHelp, toStdout=True)
try:
exec('import %s as module' % moduleName)
helpText = module.__doc__.decode('utf-8')
if hasattr(module, 'docuReplacements'):
for key, value in module.docuReplacements.iteritems():
helpText = helpText.replace(key, value.strip('\n\r'))
output(helpText, toStdout=True)
except:
output(u'Sorry, no help available for %s' % moduleName)
#########################
# Interpret configuration
#########################
# search for user interface module in the 'userinterfaces' subdirectory
sys.path.append(config.datafilepath('userinterfaces'))
exec "import %s_interface as uiModule" % config.userinterface
ui = uiModule.UI()
verbose = 0
simulate = False
# TEST for bug #3081100
unicode_error = __import__('unicodedata').normalize(
'NFC',
u'\u092e\u093e\u0930\u094d\u0915 \u091c\u093c\u0941\u0915\u0947\u0930\u092c\u0930\u094d\u0917') != \
u'\u092e\u093e\u0930\u094d\u0915 \u091c\u093c\u0941\u0915\u0947\u0930\u092c\u0930\u094d\u0917'
if unicode_error:
print u'unicode test: triggers problem #3081100'
default_family = config.family
default_code = config.mylang
# Check
# if the default family+wiki is a non-public one,
# getSite will try login in. We don't want that, the module
# is not yet loaded.
getSite(noLogin=True)
# Set socket timeout
socket.setdefaulttimeout(config.socket_timeout)
def writeToCommandLogFile():
"""
Save the name of the called module along with all parameters to
logs/commands.log so that the user can look it up later to track errors
or report bugs.
"""
modname = os.path.basename(sys.argv[0])
# put quotation marks around all parameters
args = [_decodeArg(modname)] + [_decodeArg('"%s"' % s)
for s in sys.argv[1:]]
commandLogFilename = config.datafilepath('logs', 'commands.log')
try:
commandLogFile = codecs.open(commandLogFilename, 'a', 'utf-8')
except IOError:
commandLogFile = codecs.open(commandLogFilename, 'w', 'utf-8')
# add a timestamp in ISO 8601 formulation
isoDate = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
commandLogFile.write("%s r%s Python %s "
% (isoDate, version.getversiondict()['rev'],
sys.version.split()[0]))
s = u' '.join(args)
commandLogFile.write(s + os.linesep)
commandLogFile.close()
# Initialize the handlers and formatters for the logging system.
#
# ( Please confer branches/rewrite/pywikibot/bot.py for further info )
logger = None
#_handlers_initialized = False
class NullHandler(logging.Handler):
"""
For backward-compatibility with Python 2.6, a local class definition
is used instead of logging.NullHandler
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def setLogfileStatus(enabled, logname=None, header=False):
# NOTE-1: disable 'fh.setFormatter(formatter)' below in order to get "old"
# logging format (without additional info)
# NOTE-2: enable 'logger.addHandler(ch)' below in order output to console
# also (e.g. for simplifying 'pywikibot.output')
global logger # _handlers_initialized
if not logger:
init_handlers()
logger = logging.getLogger('pywiki')
# three handlers allowed (Null, one for pwb.py, one for script)
if len(logger.handlers) < 3:
moduleName = calledModuleName()
if not moduleName:
moduleName = "terminal-interface"
if not logname:
logname = '%s.log' % moduleName
if pywikibot.throttle.pid > 1:
logname = '%s.%s.log' % (moduleName, pywikibot.throttle.pid)
logfn = config.datafilepath('logs', logname)
# create file handler which logs even debug messages
if config.loghandler.upper() == 'RFH':
fh = logging.handlers.RotatingFileHandler(
filename=logfn, maxBytes=1024 * config.logfilesize,
backupCount=config.logfilecount, encoding="utf-8")
else:
ver = int('%02i%02i' % tuple(sys.version_info)[:2])
kwargs = {'when': 'midnight',
## 'encoding': 'bz2-codec'
'encoding': "utf-8"}
if ver > int('0205'):
# For Python > 2.5 (added in version 2.6)
kwargs['utc'] = True
fh = logging.handlers.TimedRotatingFileHandler(logfn, **kwargs)
# patch for "Issue 8117: TimedRotatingFileHandler doesn't rotate log
# file at startup."
# applies to python2.6 only, solution filched from python2.7 source:
# http://hg.python.org/cpython-fullhistory/diff/a566e53f106d/Lib/logging/handlers.py
if os.path.exists(logfn) and (ver == int('0206')):
t = os.stat(logfn).st_mtime
fh.rolloverAt = fh.computeRollover(t)
## fh.setLevel(DEBUG)
# create console handler with a higher log level
## ch = logging.StreamHandler()
## ch.setLevel(DEBUG)
# create formatter and add it to the handlers
# (using LogRecord attributes)
formatter = logging.Formatter(fmt="%(asctime)s %(caller_file)18s, "
"%(caller_line)4s in %(caller_name)18s: "
"%(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
fh.setFormatter(formatter)
#ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(fh) # output to logfile
## logger.addHandler(ch) # output to terminal/shell console
if header:
writelogheader()
#logger.propagate = enabled
def init_handlers(strm=None): # , logname=None, header=False):
"""Initialize logging system for terminal-based bots.
This function must be called before using pywikibot.output(); and must
be called again if the destination stream is changed.
@param strm: Output stream. If None, re-uses the last stream if one
was defined, otherwise uses sys.stderr
Note: this function is called by handleArgs(), so it should normally
not need to be called explicitly
All user output is routed through the logging module.
Each type of output is handled by an appropriate handler object.
This structure is used to permit eventual development of other
user interfaces (GUIs) without modifying the core bot code.
The following output levels are defined:
DEBUG - only for file logging; debugging messages
STDOUT - output that must be sent to sys.stdout (for bots that may
have their output redirected to a file or other destination)
VERBOSE - optional progress information for display to user
INFO - normal (non-optional) progress information for display to user
INPUT - prompts requiring user response
WARN - user warning messages
ERROR - user error messages
CRITICAL - fatal error messages
Accordingly, do ''not'' use print statements in bot code; instead,
use pywikibot.output function.
"""
# currently only the logger is initialized here
# the handlers are initialized in setLogfileStatus
global logger # _handlers_initialized
if not logger:
logging.addLevelName(VERBOSE, "VERBOSE")
# for messages to be displayed on terminal at "verbose" setting
# use INFO for messages to be displayed even on non-verbose setting
logging.addLevelName(STDOUT, "STDOUT")
# for messages to be displayed to stdout
logging.addLevelName(INPUT, "INPUT")
# for prompts requiring user response
logger = logging.getLogger() # root logger
nh = NullHandler()
logger.addHandler(nh)
logger.setLevel(DEBUG + 1)
if hasattr(logger, 'captureWarnings'):
logger.captureWarnings(True) # introduced in Python >= 2.7
logger = logging.getLogger('pywiki')
logger.addHandler(nh)
logger.setLevel(DEBUG + 1)
logger.propagate = True
def writelogheader():
"""
Save additional version, system and status info to the logfile in use,
so that the user can look it up later to track errors or report bugs.
"""
output(u'=== Pywikipediabot framework v1.0 -- Logging header ===')
# script call
output(u'COMMAND: %s' % unicode(sys.argv))
# new framework release/revision? (handleArgs needs to be called first)
site = getSite()
try:
output(u'VERSION: %s' % unicode((version.getversion().strip(),
version.getversion_onlinerepo(),
site.live_version())))
except version.ParseError:
exception()
# system
if hasattr(os, 'uname'):
output(u'SYSTEM: %s' % unicode(os.uname()))
# imported modules
output(u'MODULES:')
for item in sys.modules.keys():
ver = version.getfileversion('%s.py' % item.replace('.', '/'))
if ver:
output(u' %s' % ver)
# messages on bot discussion page?
output(u'MESSAGES: %s' % ('unanswered' if site.messages() else 'none'))
output(u'=== ' * 14)
init_handlers() # guarantee 'logger' to be defined
writeToCommandLogFile()
colorTagR = re.compile('\03{.*?}', re.UNICODE)
output_lock = threading.Lock()
input_lock = threading.Lock()
output_cache = []
# User output/logging functions
# Six output functions are defined. Each requires a unicode or string
# argument. All of these functions generate a message to the log file if
# logging is enabled ("-log" or "-debug" command line arguments).
# The functions output(), stdout(), warning(), and error() all display a
# message to the user through the logger object; the only difference is the
# priority level, which can be used by the application layer to alter the
# display. The stdout() function should be used only for data that is
# the "result" of a script, as opposed to information messages to the
# user.
# The function log() by default does not display a message to the user, but
# this can be altered by using the "-verbose" command line option.
# The function debug() only logs its messages, they are never displayed on
# the user console. debug() takes a required second argument, which is a
# string indicating the debugging layer.
# ( Please confer branches/rewrite/pywikibot/bot.py for further info )
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
# go back two levels, one for logoutput and one for whatever called it
return sys.exc_traceback.tb_frame.f_back.f_back
if hasattr(sys, '_getframe'):
# less portable but more efficient
currentframe = lambda: sys._getframe(3)
# frame0 is this lambda, frame1 is logoutput() in this module,
# frame2 is the convenience function (output(), etc.)
# so frame3 is whatever called the convenience function
# done filching
def logoutput(text, decoder=None, newline=True, _level=INFO, _logger="",
**kwargs):
"""Format output and send to the logging module.
Backend function used by all the user-output convenience functions.
"""
if _logger:
log = logging.getLogger("pywiki." + _logger)
else:
log = logging.getLogger("pywiki")
# make sure logging system has been initialized
if not logger:
init_handlers()
frame = currentframe()
module = os.path.basename(frame.f_code.co_filename)
context = {'caller_name': frame.f_code.co_name,
'caller_file': module,
'caller_line': frame.f_lineno,
'newline': ("\n" if newline else "")}
if decoder:
text = unicode(text, decoder)
elif not isinstance(text, unicode):
if not isinstance(text, str):
# looks like text is a non-text object.
# Maybe it has a __unicode__ builtin ?
# (allows to print Page, Site...)
text = unicode(text)
else:
try:
text = unicode(text, 'utf-8')
except UnicodeDecodeError:
text = unicode(text, 'iso8859-1')
log.log(_level, text, extra=context, **kwargs)
# instead of logging handler for output to console (StreamHandler)
if _level != INFO:
text = u'%s: %s' % (logging.getLevelName(_level), text)
if log.isEnabledFor(_level):
_outputOld(text, decoder, newline, (_level == STDOUT), **kwargs)
def _outputOld(text, decoder=None, newline=True, toStdout=False, **kwargs):
"""Output a message to the user via the userinterface.
Works like print, but uses the encoding used by the user's console
(console_encoding in the configuration file) instead of ASCII.
If decoder is None, text should be a unicode string. Otherwise it
should be encoded in the given encoding.
If newline is True, a linebreak will be added after printing the text.
If toStdout is True, the text will be sent to standard output,
so that it can be piped to another process. All other text will
be sent to stderr. See: http://en.wikipedia.org/wiki/Pipeline_%28Unix%29
text can contain special sequences to create colored output. These
consist of the escape character \03 and the color name in curly braces,
e. g. \03{lightpurple}. \03{default} resets the color.
"""
output_lock.acquire()
try:
if decoder:
text = unicode(text, decoder)
elif type(text) is not unicode:
if verbose and sys.platform != 'win32':
print "DEBUG: > BUG: Non-unicode (%s) passed to " \
"wikipedia.output without decoder!" % type(text)
print traceback.print_stack()
print "DEBUG: > Attempting to recover, but please report " \
"this problem"
try:
text = unicode(text, 'utf-8')
except UnicodeDecodeError:
text = unicode(text, 'iso8859-1')
if newline:
text += u'\n'
if input_lock.locked():
cache_output(text, toStdout=toStdout)
else:
ui.output(text, toStdout=toStdout)
finally:
output_lock.release()
def cache_output(*args, **kwargs):
output_cache.append((args, kwargs))
def flush_output_cache():
while(output_cache):
(args, kwargs) = output_cache.pop(0)
ui.output(*args, **kwargs)
def output(text, decoder=None, newline=True, toStdout=False, **kwargs):
"""Output a message to the user via the userinterface.
Works like print, but uses the encoding used by the user's console
(console_encoding in the configuration file) instead of ASCII.
If decoder is None, text should be a unicode string. Otherwise it
should be encoded in the given encoding.
If newline is True, a linebreak will be added after printing the text.
If toStdout is True, the text will be sent to standard output,
so that it can be piped to another process. All other text will
be sent to stderr. See: http://en.wikipedia.org/wiki/Pipeline_%28Unix%29
text can contain special sequences to create colored output. These
consist of the escape character \03 and the color name in curly braces,
e. g. \03{lightpurple}. \03{default} resets the color.
Other keyword arguments are passed unchanged to the logger; so far, the
only argument that is useful is "exc_info=True", which causes the
log message to include an exception traceback.
"""
if toStdout: # maintained for backwards-compatibity only
logoutput(text, decoder, newline, STDOUT, **kwargs)
else:
logoutput(text, decoder, newline, INFO, **kwargs)
def stdout(text, decoder=None, newline=True, **kwargs):
"""Output script results to the user via the userinterface."""
logoutput(text, decoder, newline, STDOUT, **kwargs)
def warning(text, decoder=None, newline=True, **kwargs):
"""Output a warning message to the user via the userinterface."""
logoutput(text, decoder, newline, WARNING, **kwargs)
def error(text, decoder=None, newline=True, **kwargs):
"""Output an error message to the user via the userinterface."""
logoutput(text, decoder, newline, ERROR, **kwargs)
def log(text, decoder=None, newline=True, **kwargs):
"""Output a record to the log file."""
logoutput(text, decoder, newline, VERBOSE, **kwargs)
def critical(text, decoder=None, newline=True, **kwargs):
"""Output a critical error message to the log file."""
logoutput(text, decoder, newline, CRITICAL, **kwargs)
def debug(text, layer="", decoder=None, newline=True, **kwargs):
"""Output a debug record to the log file."""
logoutput(text, decoder, newline, DEBUG, layer, **kwargs)
def debugDump(name, site, error, data, **kwargs):
"""Output a very long debug/error message to own log file."""
name = unicode(name)
site = repr(site)
data = pprint.pformat(data)
if isinstance(error, BaseException):
error = traceback.format_exception_only(type(error), error)[-1]
else:
error = unicode(error)
filename = '%s_%s__%s.dump' % (name, site, time.asctime())
filename = filename.replace(' ', '_').replace(':', '-')
f = file(filename, 'wb') # trying to write it in binary
## f = codecs.open(filename, 'w', 'utf-8')
f.write(u'Error reported: %s\n\n' % error)
try:
f.write(data.encode("utf8"))
except UnicodeDecodeError:
f.write(data)
f.close()
logoutput(u'%s caused error %s. Dump %s created.' % (name, error, filename),
decoder=None, newline=True, _level=ERROR, **kwargs)
## decoder=None, newline=True, _level=DEBUG, **kwargs)
def exception(msg=None, decoder=None, newline=True, tb=False, **kwargs):
"""Output an error traceback to the user via the userinterface.
@param tb: Set to True in order to output traceback also.
Use directly after an 'except' statement:
...
except:
pywikibot.exception()
...
or alternatively:
...
except Exception, e:
pywikibot.exception(e)
...
"""
if isinstance(msg, BaseException):
exc_info = 1
else:
exc_info = sys.exc_info()
msg = traceback.format_exception_only(exc_info[0],
exc_info[1])[-1].strip()
if tb:
kwargs['exc_info'] = exc_info
_outputOld(traceback.format_exc().strip()) # (temporary work-a-round)
logoutput(msg, decoder, newline, ERROR, **kwargs)
# User input functions
def input(question, password=False):
"""Ask the user a question, return the user's answer.
Parameters:
* question - a unicode string that will be shown to the user. Don't add a
space after the question mark/colon, this method will do this
for you.
* password - if True, hides the user's input (for password entry).
Returns a unicode string.
"""
input_lock.acquire()
try:
data = ui.input(question, password)
finally:
flush_output_cache()
input_lock.release()
return data
def inputChoice(question, answers, hotkeys, default=None):
"""Ask the user a question with several options, return the user's choice.
The user's input will be case-insensitive, so the hotkeys should be
distinctive case-insensitively.
Parameters:
* question - a unicode string that will be shown to the user. Don't add a
space after the question mark, this method will do this
for you.
* answers - a list of strings that represent the options.
* hotkeys - a list of one-letter strings, one for each answer.
* default - an element of hotkeys, or None. The default choice that will
be returned when the user just presses Enter.
Returns a one-letter string in lowercase.
"""
input_lock.acquire()
try:
data = ui.inputChoice(question, answers, hotkeys, default).lower()
finally:
flush_output_cache()
input_lock.release()
return data
page_put_queue = Queue.Queue(config.max_queue_size)
def async_put():
"""Daemon; take pages from the queue and try to save them on the wiki."""
while True:
(page, newtext, comment, watchArticle, minorEdit, force, callback
) = page_put_queue.get()
if page is None:
# an explicit end-of-Queue marker is needed for compatibility
# with Python 2.4; in 2.5, we could use the Queue's task_done()
# and join() methods
return
try:
page.put(newtext, comment, watchArticle, minorEdit, force)
error = None
except Exception, error:
pass
if callback is not None:
callback(page, error)
# if callback is provided, it is responsible for exception handling
continue
if isinstance(error, SpamfilterError):
output(u"Saving page %s prevented by spam filter: %s"
% (page, error.url))
elif isinstance(error, PageNotSaved):
output(u"Saving page %s failed: %s" % (page, error))
elif isinstance(error, LockedPage):
output(u"Page %s is locked; not saved." % page)
elif isinstance(error, NoUsername):
output(u"Page %s not saved; sysop privileges required." % page)
elif error is not None:
exception(error, tb=True)
output(u"Saving page %s failed!" % page)
_putthread = threading.Thread(target=async_put)
# identification for debugging purposes
_putthread.setName('Put-Thread')
_putthread.setDaemon(True)
## Don't start the queue if it is not necessary.
#_putthread.start()
def stopme():
"""This should be run when a bot does not interact with the Wiki, or
when it has stopped doing so. After a bot has run stopme() it will
not slow down other bots any more.
"""
get_throttle.drop()
#logging.shutdown()
def _flush():
"""Wait for the page-putter to flush its queue.
Called automatically upon exiting from Python.
"""
def remaining():
remainingPages = page_put_queue.qsize() - 1
# -1 because we added a None element to stop the queue
remainingSeconds = datetime.timedelta(
seconds=(remainingPages * put_throttle.getDelay(True)))
return (remainingPages, remainingSeconds)
page_put_queue.put((None, ) * 7)
if page_put_queue.qsize() > 1:
num, sec = remaining()
format_values = dict(num=num, sec=sec)
output(u'\03{lightblue}'
u'Waiting for %(num)i pages to be put. '
u'Estimated time remaining: %(sec)s'
'\03{default}' % format_values)
while(_putthread.isAlive()):
try:
_putthread.join(1)
except KeyboardInterrupt:
answer = inputChoice(u"""\
There are %i pages remaining in the queue. Estimated time remaining: %s
Really exit?"""
% remaining(),
['yes', 'no'], ['y', 'N'], 'N')
if answer == 'y':
return
try:
get_throttle.drop()
except NameError:
pass
if config.use_diskcache and not config.use_api:
for site in _sites.itervalues():
if site._mediawiki_messages:
try:
site._mediawiki_messages.delete()
except OSError:
pass
import atexit
atexit.register(_flush)
get_throttle = Throttle()
put_throttle = Throttle(write=True)
def decompress_gzip(data):
# Use cStringIO if available
# TODO: rewrite gzip.py such that it supports unseekable fileobjects.
if data:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import gzip
try:
data = gzip.GzipFile(fileobj=StringIO(data)).read()
except IOError:
raise
return data
def parsetime2stamp(tz):
s = time.strptime(tz, "%Y-%m-%dT%H:%M:%SZ")
return int(time.strftime("%Y%m%d%H%M%S", s))
#Redirect Handler for urllib2
class U2RedirectHandler(urllib2.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
newreq = urllib2.HTTPRedirectHandler.redirect_request(self, req, fp,
code, msg,
headers, newurl)
if (newreq.get_method() == "GET"):
for cl in ("Content-Length", "Content-length", "content-length",
"CONTENT-LENGTH"):
if newreq.has_header(cl):
del newreq.headers[cl]
return newreq
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.code = code
result.sheaders = [v for v in headers.__str__().split('\n')
if v.startswith('Set-Cookie:')]
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.code = code
result.sheaders = [v for v in headers.__str__().split('\n')
if v.startswith('Set-Cookie:')]
return result
# Site Cookies handler
COOKIEFILE = config.datafilepath('login-data', 'cookies.lwp')
cj = cookielib.LWPCookieJar()
if os.path.isfile(COOKIEFILE):
cj.load(COOKIEFILE)
cookieProcessor = urllib2.HTTPCookieProcessor(cj)
MyURLopener = urllib2.build_opener(U2RedirectHandler)
if config.proxy['host']:
proxyHandler = urllib2.ProxyHandler({'http': 'http://%s/' % config.proxy['host'],
'https': 'https://%s' % config.proxy['host']})
MyURLopener.add_handler(proxyHandler)
if config.proxy['auth']:
proxyAuth = urllib2.HTTPPasswordMgrWithDefaultRealm()
proxyAuth.add_password(None, config.proxy['host'],
config.proxy['auth'][0], config.proxy['auth'][1])
proxyAuthHandler = urllib2.ProxyBasicAuthHandler(proxyAuth)
MyURLopener.add_handler(proxyAuthHandler)
if config.authenticate:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
for site in config.authenticate:
passman.add_password(None, site, config.authenticate[site][0],
config.authenticate[site][1])
authhandler = urllib2.HTTPBasicAuthHandler(passman)
MyURLopener.add_handler(authhandler)
MyURLopener.addheaders = [('User-agent', useragent)]
# The following will monkey-patch the pywikibot module to contain the same
# functions and variables as wikipedia itself. This means we no longer have
# to import wikipedia as pywikibot - instead, we can just import pywikibot
#
# Alas, it ain't beautiful, but it does work.
import pywikibot
pywikibot.__dict__.update(locals())
if '*' in config.log or calledModuleName() in config.log:
setLogfileStatus(True)
if __name__ == '__main__':
import doctest
print 'Pywikibot %s' % version.getversion()
print 'Python %s' % sys.version
doctest.testmod()
| gpl-3.0 |
jwhonce/origin | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py | 365 | 1084 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| apache-2.0 |
jonashagstedt/tornado | tornado/util.py | 1 | 12529 | """Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import inspect
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if not isinstance(b'', type('')):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
# These names don't exist in py3, so use noqa comments to disable
# warnings in flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if isinstance(name, unicode_type) and str is not unicode_type:
# On python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| apache-2.0 |
ensemblr/llvm-project-boilerplate | include/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py | 17 | 3201 | # Test normal conditional branches in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffd8 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 8 bytes if it uses a short branch
# and 10 if it uses a long one. The ones before "main:" have to take the branch
# length into account -- which is 4 bytes for short branches -- so the final
# (0x28 - 4) / 8 == 4 blocks can use short branches. The ones after "main:"
# do not, so the first 0x28 / 8 == 5 can use short branches. However,
# the conservative algorithm we use makes one branch unnecessarily long
# on each side.
#
# CHECK: c %r4, 0(%r3)
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 4(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 8(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 12(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 16(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 20(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 24(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 28(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 32(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 36(%r3)
# CHECK: je [[LABEL]]
# ...main goes here...
# CHECK: c %r4, 100(%r3)
# CHECK: je [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 104(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 108(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 112(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 116(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 120(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 124(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 128(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 132(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 136(%r3)
# CHECK: jge [[LABEL]]
branch_blocks = 10
main_size = 0xffd8
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i32 , i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| mit |
kytos/python-openflow | tests/unit/v0x01/test_controller2switch/test_packet_out.py | 1 | 2854 | """Packet out message tests."""
from pyof.foundation.exceptions import ValidationError
from pyof.v0x01.common.action import ActionOutput
from pyof.v0x01.common.phy_port import Port
from pyof.v0x01.controller2switch.packet_out import PacketOut
from tests.unit.test_struct import TestStruct
class TestPacketOut(TestStruct):
"""Packet out message tests (also those in :class:`.TestDump`).
Attributes:
message (PacketOut): The message configured in :meth:`setUpClass`.
"""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=8, buffer_id=4294967295,
in_port=Port.OFPP_NONE, data=_get_data(),
actions=_get_actions())
super().set_minimum_size(16)
def setUp(self):
"""Run before every test."""
self.message = self.get_raw_object()
def test_valid_virtual_in_ports(self):
"""Valid virtual ports as defined in 1.0.1 spec."""
valid = (Port.OFPP_LOCAL, Port.OFPP_CONTROLLER, Port.OFPP_NONE)
for in_port in valid:
self.message.in_port = in_port
self.assertTrue(self.message.is_valid())
def test_invalid_virtual_in_ports(self):
"""Invalid virtual ports as defined in 1.0.1 spec."""
invalid = (Port.OFPP_IN_PORT, Port.OFPP_TABLE, Port.OFPP_NORMAL,
Port.OFPP_FLOOD, Port.OFPP_ALL)
for in_port in invalid:
self.message.in_port = in_port
self.assertFalse(self.message.is_valid())
self.assertRaises(ValidationError, self.message.validate)
def test_valid_physical_in_ports(self):
"""Physical port limits from 1.0.0 spec."""
max_valid = int(Port.OFPP_MAX.value)
for in_port in (1, max_valid):
self.message.in_port = in_port
self.assertTrue(self.message.is_valid())
def test_invalid_physical_in_port(self):
"""Physical port limits from 1.0.0 spec."""
max_valid = int(Port.OFPP_MAX.value)
for in_port in (-1, 0, max_valid + 1, max_valid + 2):
self.message.in_port = in_port
self.assertFalse(self.message.is_valid())
self.assertRaises(ValidationError, self.message.validate)
def _get_actions():
"""Function used to return a list of actions used by packetout instance."""
action = ActionOutput(port=1, max_length=0)
return [action]
def _get_data():
"""Function used to return a BinaryData used by packetout instance."""
data = b'\x01# \x00\x00\x01\xd2A\xc6.*@\x88\xcc\x02\x07\x07dpi'
data += b'd:1\x04\x02\x021\x06\x02\x00x\x0c\x06dpid:1\x00\x00'
return data
| mit |
alexbrasetvik/Piped | contrib/manhole/piped_manhole/providers.py | 1 | 8642 | # Copyright (c) 2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
from twisted.application import internet, service, strports
from twisted.conch import manhole, manhole_ssh, error as conch_error
from twisted.conch.insults import insults
from twisted.conch.ssh import keys
from twisted.cred import error, portal
from twisted.internet import defer
from twisted.python import reflect
from zope import interface
from piped import resource
class ManholeProvider(object, service.MultiService):
""" Embeds manholes in Piped services.
Configuration example::
manholes:
my_manhole:
enabled: true # defaults to true
port: 10022 # defaults to 10022
# or instead of port, you may use a strport
# listen: 10222
keys:
public_key_file: path # or public_key: str
private_key_file: path # or private_key: str
checkers: # multiple checkers are allowed
inmemory:
checker: twisted.cred.checkers.InMemoryUsernamePasswordDatabaseDontUse
arguments:
username: password
"""
interface.classProvides(resource.IResourceProvider)
def __init__(self):
service.MultiService.__init__(self)
def configure(self, runtime_environment):
self.setName('manhole')
self.setServiceParent(runtime_environment.application)
self.manholes = runtime_environment.get_configuration_value('manholes', dict())
for manhole_name, manhole_configuration in self.manholes.items():
if not manhole_configuration.get('enabled', True):
continue # this manhole has been disabled, so don't create it
manholeservice = ManholeService(manhole_configuration)
manholeservice.setName(manhole_name)
manholeservice.setServiceParent(self)
manholeservice.configure(runtime_environment)
class PipedManhole(manhole.ColoredManhole):
""" A colored manhole that handles a few extra key combinations. """
def connectionMade(self):
r = manhole.ColoredManhole.connectionMade(self)
# add a keyhandler for what my macbook sends when Im hitting backspace
self.keyHandlers['\x08'] = self.handle_BACKSPACE
self.keyHandlers['\x01'] = self.handle_HOME # CTRL-A
self.keyHandlers['\x05'] = self.handle_END # CTRL-E
self.keyHandlers['\x15'] = self.handle_BACKSPACE_LINE # CTRL-U
self.keyHandlers['\x17'] = self.handle_BACKSPACE_WORD # CTRL-W
return r
def handle_BACKSPACE_LINE(self):
while self.lineBufferIndex > 0:
self.handle_BACKSPACE()
WORD_DELIMITERS = """ .;:({['\""""
def handle_BACKSPACE_WORD(self):
self.handle_BACKSPACE()
while self.lineBufferIndex > 0 and self.lineBuffer[self.lineBufferIndex-1] not in self.WORD_DELIMITERS:
self.handle_BACKSPACE()
class PipedConchFactory(manhole_ssh.ConchFactory):
""" A conch factory that can be initialized with an explicit pair of
public_key, private_key to use.
"""
def __init__(self, portal, private_key=None, public_key=None, **kw):
manhole_ssh.ConchFactory.__init__(self, portal)
if private_key:
self.privateKeys = {
'ssh-rsa' : keys.Key.fromString(private_key)
}
if public_key:
self.publicKeys = {
'ssh-rsa' : keys.Key.fromString(public_key)
}
class ManholeService(service.MultiService):
""" A configurable manhole service.
See ManholeProvider for a configuration example.
"""
protocolFactory = PipedManhole
conchFactory = PipedConchFactory
def __init__(self, manhole_configuration):
service.MultiService.__init__(self)
self.manhole_configuration = manhole_configuration
def configure(self, runtime_environment):
self.runtime_environment = runtime_environment
self.key_config = self._normalize_key_config(self.manhole_configuration.get('keys', dict()))
factory = self._make_factory()
listen = str(self.manhole_configuration.get('listen', self.manhole_configuration.get('port', 10022)))
tcpservice = strports.service(listen, factory)
tcpservice.setName(self.name)
tcpservice.setServiceParent(self)
self._configure_dependencies(self.manhole_configuration.get('dependencies', dict()))
def _configure_dependencies(self, dependency_map):
for dependency_key, dependency_configuration in dependency_map.items():
if isinstance(dependency_configuration, basestring):
dependency_configuration = dependency_map[dependency_key] = dict(provider=dependency_configuration)
self.dependencies = self.runtime_environment.create_dependency_map(self, **dependency_map)
def _normalize_key_config(self, key_config):
private_key_file = key_config.pop('private_key_file', None)
public_key_file = key_config.pop('public_key_file', None)
if private_key_file:
private_key_file = getattr(private_key_file, 'path', private_key_file)
key_config['private_key'] = open(private_key_file).read()
if public_key_file:
public_key_file = getattr(public_key_file, 'path', public_key_file)
key_config['public_key'] = open(public_key_file).read()
return key_config
def _make_factory(self):
checkers = self._make_checkers()
realm = PipedTerminalRealm()
portal_ = MultipleCheckersPortal(realm, checkers)
def chainProtocolFactory():
return insults.ServerProtocol(self.protocolFactory, namespace=self._get_namespace())
realm.chainedProtocolFactory = chainProtocolFactory
factory = self.conchFactory(portal_, **self.key_config)
return factory
def _make_checkers(self):
cs = list()
for checker_config in self.manhole_configuration['checkers'].values():
checker_name = checker_config.pop('checker')
checker_factory = reflect.namedAny(checker_name)
checker = checker_factory(**checker_config.get('arguments', dict()))
cs.append(checker)
return cs
def _get_namespace(self):
namespace = dict(runtime_environment=self.runtime_environment, dependencies=self.dependencies)
for key, value in self.manhole_configuration.get('namespace', dict()).items():
namespace[key] = reflect.namedAny(value)
return namespace
class PipedTerminalSessionTransport(manhole_ssh.TerminalSessionTransport):
def __init__(self, proto, chainedProtocol, avatar, width, height):
chainedProtocol.avatar = avatar
manhole_ssh.TerminalSessionTransport.__init__(self, proto, chainedProtocol, avatar, width, height)
class PipedTerminalRealm(manhole_ssh.TerminalRealm):
transportFactory = PipedTerminalSessionTransport
def _getAvatar(self, avatarId):
avatar = manhole_ssh.TerminalRealm._getAvatar(self, avatarId)
avatar.avatarId = avatarId
return avatar
class MultipleCheckersPortal(portal.Portal):
""" A Portal subclass that authenticates against multiple checkers. """
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers.setdefault(credentialInterface, list()).append(checker)
@defer.inlineCallbacks
def login(self, credentials, mind, *interfaces):
for i in self.checkers:
if i.providedBy(credentials):
for checker in self.checkers[i]:
try:
avatar_id = yield checker.requestAvatarId(credentials)
avatar = yield self.realm.requestAvatar(avatar_id, mind, *interfaces)
defer.returnValue(avatar)
except conch_error.ValidPublicKey:
# This is how SSHPublicKeyDatabase says "Your public key is okay, now prove you have
# the private key to continue".
raise
except error.UnauthorizedLogin:
continue
raise error.UnauthorizedLogin()
ifac = interface.providedBy(credentials)
raise error.UnhandledCredentials("No checker for %s" % ', '.join(map(reflect.qual, ifac)))
| mit |
aaltay/beam | learning/katas/python/Core Transforms/Flatten/Flatten/tests.py | 7 | 1218 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_helper import failed, passed, get_file_output, test_is_not_empty
def test_output():
output = get_file_output()
answers = ['ball', 'book', 'bow', 'apple', 'ant', 'arrow']
if all(num in output for num in answers):
passed()
else:
failed("Incorrect output. Flatten both PCollection into one.")
if __name__ == '__main__':
test_is_not_empty()
test_output()
| apache-2.0 |
jonasschnelli/bitcoin | contrib/testgen/base58.py | 8 | 3124 | # Copyright (c) 2012-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') == 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
Mixser/django | django/template/__init__.py | 198 | 2022 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, Template, Variable,
)
# Deprecated in Django 1.8, will be removed in Django 1.10.
from .base import resolve_variable # NOQA isort:skip
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
| bsd-3-clause |
kudrom/genesis2 | genesis2/plugins/install_plugins/provider.py | 1 | 8220 |
class RepositoryManager:
"""
Manages official Genesis plugin repository. ``cfg`` is :class:`genesis.config.Config`
- ``available`` - list(:class:`PluginInfo`), plugins available in the repository
- ``installed`` - list(:class:`PluginInfo`), plugins that are locally installed
- ``upgradable`` - list(:class:`PluginInfo`), plugins that are locally installed
and have other version in the repository
"""
def __init__(self, cfg):
self.config = cfg
self.server = cfg.get('genesis', 'update_server')
self.refresh()
def list_available(self):
d = {}
for x in self.available:
d[x.id] = x
return d
def check_conflict(self, id, op):
"""
Check if an operation can be performed due to dependency conflict
"""
pdata = PluginLoader.list_plugins()
if op == 'remove':
for i in pdata:
for dep in pdata[i].deps:
if dep[0] == 'plugin' and dep[1] == id and dep[1] in [x.id for x in self.installed]:
raise ImSorryDave(pdata[dep[1]].name, pdata[i].name, op)
elif op == 'install':
t = self.list_available()
try:
for i in eval(t[id].deps):
for dep in i[1]:
if dep[0] == 'plugin' and dep[1] not in [x.id for x in self.installed]:
raise ImSorryDave(t[id].name, t[dep[1]].name, op)
except KeyError:
raise Exception('There was a problem in checking dependencies. '
'Please try again after refreshing the plugin list. '
'If this problem persists, please contact Genesis maintainers.')
def refresh(self):
"""
Re-reads saved repository information and rebuilds installed/available lists
"""
self.available = []
self.installed = []
self.update_installed()
self.update_available()
self.update_upgradable()
def update_available(self):
"""
Re-reads saved list of available plugins
"""
try:
data = eval(open('/var/lib/genesis/plugins.list').read())
except:
return
self.available = []
for item in data:
inst = False
for i in self.installed:
if i.id == item['id'] and i.version == item['version']:
inst = True
break
if inst:
continue
i = PluginInfo()
for k, v in item.items():
setattr(i, k, v)
i.installed = False
i.problem = None
self.available.append(i)
def update_installed(self):
"""
Rebuilds list of installed plugins
"""
self.installed = sorted(PluginLoader.list_plugins().values(), key=lambda x: x.name)
def update_upgradable(self):
"""
Rebuilds list of upgradable plugins
"""
upg = []
for p in self.available:
u = False
g = None
for g in self.installed:
if g.id == p.id and g.version != p.version:
u = True
break
if u:
g.upgradable = p.upgradable = True
upg += [g]
self.upgradable = upg
def update_list(self, crit=False):
"""
Downloads fresh list of plugins and rebuilds installed/available lists
"""
if not os.path.exists('/var/lib/genesis'):
os.mkdir('/var/lib/genesis')
try:
data = download('http://%s/genesis/list/%s' % (self.server, PluginLoader.platform), crit=crit)
except urllib2.HTTPError, e:
raise Exception('Application list retrieval failed with HTTP Error %s' % str(e.code))
except urllib2.URLError, e:
raise Exception('Application list retrieval failed - Server not found or URL malformed. '
'Please check your Internet settings.')
open('/var/lib/genesis/plugins.list', 'w').write(data)
self.update_installed()
self.update_available()
self.update_upgradable()
def remove(self, id, cat=''):
"""
Uninstalls given plugin
:param id: Plugin id
:type id: str
"""
try:
self.purge = self.config.get('genesis', 'purge')
except:
self.purge = '1'
exclude = ['openssl', 'nginx']
if cat:
cat.put_statusmsg('Removing plugin...')
dir = self.config.get('genesis', 'plugins')
shell('rm -r %s/%s' % (dir, id))
if id in PluginLoader.list_plugins():
depends = []
try:
pdata = PluginLoader.list_plugins()
thisplugin = pdata[id].deps
for thing in thisplugin:
if 'app' in thing[0]:
depends.append((thing, 0))
for plugin in pdata:
for item in enumerate(depends):
if item[1][0] in pdata[plugin].deps:
depends[item[0]] = (depends[item[0]][0], depends[item[0]][1]+1)
for thing in depends:
if thing[1] <= 1 and not thing[0][1] in exclude:
if cat:
cat.put_statusmsg('Removing dependency %s...' % thing[0][1])
shell('systemctl stop ' + thing[0][2])
shell('systemctl disable ' + thing[0][2])
shell('pacman -%s --noconfirm ' % ('Rn' if self.purge is '1' else 'R') + thing[0][1])
except KeyError:
pass
PluginLoader.unload(id)
self.update_installed()
self.update_available()
if cat:
cat.put_message('info', 'Plugin removed. Refresh page for changes to take effect.')
def install(self, id, load=True, cat=''):
"""
Installs a plugin
:param id: Plugin id
:type id: str
:param load: True if you want Genesis to load the plugin immediately
:type load: bool
"""
dir = self.config.get('genesis', 'plugins')
if cat:
cat.put_statusmsg('Downloading plugin package...')
download('http://%s/genesis/plugin/%s' % (self.server, id), file='%s/plugin.tar.gz' % dir, crit=True)
self.remove(id)
self.install_tar(load=load, cat=cat)
def install_stream(self, stream):
"""
Installs a plugin from a stream containing the package
:param stream: Data stream
:type stream: file
"""
dir = self.config.get('genesis', 'plugins')
open('%s/plugin.tar.gz' % dir, 'w').write(stream)
self.install_tar()
def install_tar(self, load=True, cat=''):
"""
Unpacks and installs a ``plugin.tar.gz`` file located in the plugins directory.
:param load: True if you want Genesis to load the plugin immediately
:type load: bool
"""
dir = self.config.get('genesis', 'plugins')
if cat:
cat.put_statusmsg('Extracting plugin package...')
id = shell('tar tzf %s/plugin.tar.gz' % dir).split('\n')[0].strip('/')
shell('cd %s; tar xf plugin.tar.gz' % dir)
shell('rm %s/plugin.tar.gz' % dir)
if load:
PluginLoader.load(id, cat=cat)
self.update_installed()
self.update_available()
self.update_upgradable()
class LiveInstall(BackgroundWorker):
def run(self, rm, id, load, cat):
rm.install(id, load=load, cat=cat)
cat.put_message('info', 'Plugin installed. Refresh page for changes to take effect.')
ComponentManager.get().rescan()
ConfManager.get().rescan()
cat._reloadfw = True
cat.clr_statusmsg()
class LiveRemove(BackgroundWorker):
def run(self, rm, id, cat):
rm.remove(id, cat)
cat._reloadfw = True
cat.clr_statusmsg()
| gpl-3.0 |
ToxicFrog/lancow | madcow/include/chardet/big5prober.py | 236 | 1683 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import Big5DistributionAnalysis
from mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| gpl-3.0 |
crdoconnor/olympia | apps/files/decorators.py | 14 | 3785 | from datetime import datetime
import functools
import commonware.log
from cache_nuggets.lib import Token
from django import http
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.shortcuts import get_object_or_404
from django.utils.http import http_date
import amo
from access import acl
from addons.decorators import owner_or_unlisted_reviewer
from files.helpers import DiffHelper, FileViewer
from files.models import File
log = commonware.log.getLogger('z.addons')
def allowed(request, file):
try:
addon = file.version.addon
except ObjectDoesNotExist:
raise http.Http404
# General case: addon is listed.
if addon.is_listed:
if ((addon.view_source and addon.status in amo.REVIEWED_STATUSES) or
acl.check_addons_reviewer(request) or
acl.check_addon_ownership(request, addon, viewer=True,
dev=True)):
return True # Public and sources are visible, or reviewer.
raise PermissionDenied # Listed but not allowed.
# Not listed? Needs an owner or an "unlisted" admin.
else:
if owner_or_unlisted_reviewer(request, addon):
return True
raise http.Http404 # Not listed, not owner or admin.
def _get_value(obj, key, value, cast=None):
obj = getattr(obj, 'left', obj)
key = obj.get_default(key)
obj.select(key)
if obj.selected:
value = obj.selected.get(value)
return cast(value) if cast else value
def last_modified(request, obj, key=None, **kw):
return _get_value(obj, key, 'modified', datetime.fromtimestamp)
def etag(request, obj, key=None, **kw):
return _get_value(obj, key, 'md5')
def file_view(func, **kwargs):
@functools.wraps(func)
def wrapper(request, file_id, *args, **kw):
file_ = get_object_or_404(File, pk=file_id)
result = allowed(request, file_)
if result is not True:
return result
try:
obj = FileViewer(file_,)
except ObjectDoesNotExist:
raise http.Http404
response = func(request, obj, *args, **kw)
if obj.selected:
response['ETag'] = '"%s"' % obj.selected.get('md5')
response['Last-Modified'] = http_date(obj.selected.get('modified'))
return response
return wrapper
def compare_file_view(func, **kwargs):
@functools.wraps(func)
def wrapper(request, one_id, two_id, *args, **kw):
one = get_object_or_404(File, pk=one_id)
two = get_object_or_404(File, pk=two_id)
for obj in [one, two]:
result = allowed(request, obj)
if result is not True:
return result
try:
obj = DiffHelper(one, two)
except ObjectDoesNotExist:
raise http.Http404
response = func(request, obj, *args, **kw)
if obj.left.selected:
response['ETag'] = '"%s"' % obj.left.selected.get('md5')
response['Last-Modified'] = http_date(obj.left.selected
.get('modified'))
return response
return wrapper
def file_view_token(func, **kwargs):
@functools.wraps(func)
def wrapper(request, file_id, key, *args, **kw):
viewer = FileViewer(get_object_or_404(File, pk=file_id))
token = request.GET.get('token')
if not token:
log.error('Denying access to %s, no token.' % viewer.file.id)
raise PermissionDenied
if not Token.valid(token, [viewer.file.id, key]):
log.error('Denying access to %s, token invalid.' % viewer.file.id)
raise PermissionDenied
return func(request, viewer, key, *args, **kw)
return wrapper
| bsd-3-clause |
KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/treewalkers/base.py | 156 | 7476 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
"""Walks a tree yielding tokens
Tokens are dicts that all have a ``type`` field specifying the type of the
token.
"""
def __init__(self, tree):
"""Creates a TreeWalker
:arg tree: the tree to walk
"""
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
"""Generates an error token with the given message
:arg msg: the error message
:returns: SerializeError token
"""
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
"""Generates an EmptyTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:arg hasChildren: whether or not to yield a SerializationError because
this tag shouldn't have children
:returns: EmptyTag token
"""
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
"""Generates a StartTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:returns: StartTag token
"""
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
"""Generates an EndTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:returns: EndTag token
"""
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
"""Generates SpaceCharacters and Characters tokens
Depending on what's in the data, this generates one or more
``SpaceCharacters`` and ``Characters`` tokens.
For example:
>>> from html5lib.treewalkers.base import TreeWalker
>>> # Give it an empty tree just so it instantiates
>>> walker = TreeWalker([])
>>> list(walker.text(''))
[]
>>> list(walker.text(' '))
[{u'data': ' ', u'type': u'SpaceCharacters'}]
>>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE
[{u'data': ' ', u'type': u'SpaceCharacters'},
{u'data': u'abc', u'type': u'Characters'},
{u'data': u' ', u'type': u'SpaceCharacters'}]
:arg data: the text data
:returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
"""
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
"""Generates a Comment token
:arg data: the comment
:returns: Comment token
"""
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
"""Generates a Doctype token
:arg name:
:arg publicId:
:arg systemId:
:returns: the Doctype token
"""
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
"""Generates an Entity token
:arg name: the entity name
:returns: an Entity token
"""
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
"""Handles unknown node types"""
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mpl-2.0 |
dropseedlabs/configyaml | configyaml/config/nodes.py | 1 | 3286 | import re
from .base import AbstractNode
from .dict import DictNode
class WildcardDictNode(DictNode):
"""A dictionary node where nearly any key is valid
The only invalid keys are:
- an actual asterisk '*'
- a key that begins with '_'
"""
def _validate_value(self):
for key, value in self._value.items():
# where key name doesn't matter (ex. groups)
key_valid, explanation = self._key_name_is_valid(key)
if key_valid:
field_class = self._dict_fields['*']['class']
field = field_class(
value=value,
value_node=self._find_node_for_key_value(key),
context=self._context,
variables=self._variables,
key=key,
parent=self
)
# don't set __dict__ if they can use any key
self._children[key] = field
else:
self._add_error(
node=self._find_node_for_key(key),
title='Invalid field name',
description=explanation
)
def _as_dict(self, redact=False):
if redact and self._should_redact():
return self._as_redacted_dict()
d = {}
for group_name in self._children.keys():
d[group_name] = self[group_name]._as_dict(redact=redact)
if self._errors:
d['errors'] = [x.as_dict() for x in self._errors]
d.update(self._as_dict_to_inject(redact=redact))
return d
def _key_name_is_valid(self, key):
if key == '*':
return False, 'Field name cannot be "*"'
if key.startswith('_'):
return False, 'Cannot start field name with a "_"'
return True, 'Valid'
class BoolNode(AbstractNode):
"""A node that must validate as a bool"""
def __init__(self, *args, **kwargs):
self._type = bool
super(BoolNode, self).__init__(*args, **kwargs)
class StringNode(AbstractNode):
"""A node that must validate as a string"""
def __init__(self, *args, **kwargs):
self._type = str
super(StringNode, self).__init__(*args, **kwargs)
class RegexNode(StringNode):
"""A node that must validate as a regular expression"""
def __init__(self, *args, **kwargs):
self.regex = None
super(RegexNode, self).__init__(*args, **kwargs)
def _validate_value(self):
try:
self.regex = re.compile(self._value)
except re.error as e:
self._add_error(title='Invalid regex', description=str(e))
class IntegerNode(AbstractNode):
"""A node that must validate as an integer"""
def __init__(self, *args, **kwargs):
self._type = int
super(IntegerNode, self).__init__(*args, **kwargs)
class PositiveIntegerNode(IntegerNode):
"""A node that must validate as a positive integer"""
def _validate_value(self):
if self._value < 0:
self._add_error(title="Invalid Value", description="Must be a positive integer")
class TypelessNode(AbstractNode):
"""A node that does not have to validate as any specific type"""
def _validate_type(self):
pass
| mit |
korex/korex-shell | gyp/pylib/gyp/sun_tool.py | 314 | 1569 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-sun-tool when using the Makefile
generator."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = SunTool()
executor.Dispatch(args)
class SunTool(object):
"""This class performs all the SunOS tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
sunlightlabs/openstates | scrapers/ar/bills.py | 2 | 9314 | import re
import csv
import urllib
import datetime
import pytz
from openstates.scrape import Scraper, Bill, VoteEvent
import lxml.html
from .common import get_slug_for_session
TIMEZONE = pytz.timezone("US/Central")
def get_utf_16_ftp_content(url):
# Rough to do this within Scrapelib, as it doesn't allow custom decoding
raw = urllib.request.urlopen(url).read().decode("utf-16")
# Also, legislature may use `NUL` bytes when a cell is empty
NULL_BYTE_CODE = "\x00"
text = raw.replace(NULL_BYTE_CODE, "")
text = text.replace("\r", "")
return text
class ARBillScraper(Scraper):
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info("no session specified, using %s", session)
self.slug = get_slug_for_session(session)
chambers = [chamber] if chamber else ["upper", "lower"]
self.bills = {}
for Chamber in chambers:
yield from self.scrape_bill(Chamber, session)
self.scrape_actions()
for bill_id, bill in self.bills.items():
yield bill
def scrape_bill(self, chamber, session):
url = "ftp://www.arkleg.state.ar.us/SessionInformation/LegislativeMeasures.txt"
page = csv.reader(get_utf_16_ftp_content(url).splitlines(), delimiter="|")
for row in page:
bill_chamber = {"H": "lower", "S": "upper"}[row[0]]
if bill_chamber != chamber:
continue
bill_id = "%s%s %s" % (row[0], row[1], row[2])
type_spec = re.match(r"(H|S)([A-Z]+)\s", bill_id).group(2)
bill_type = {
"B": "bill",
"R": "resolution",
"JR": "joint resolution",
"CR": "concurrent resolution",
"MR": "memorial",
"CMR": "concurrent memorial",
}[type_spec]
if row[-1] != self.slug:
continue
bill = Bill(
bill_id,
legislative_session=session,
chamber=chamber,
title=row[3],
classification=bill_type,
)
bill.add_source(url)
primary = row[11]
if not primary:
primary = row[12]
if primary:
bill.add_sponsorship(
primary,
classification="primary",
entity_type="person",
primary=True,
)
version_url = (
"ftp://www.arkleg.state.ar.us/Bills/"
"%s/Public/Searchable/%s.pdf" % (self.slug, bill_id.replace(" ", ""))
)
bill.add_version_link(bill_id, version_url, media_type="application/pdf")
yield from self.scrape_bill_page(bill)
self.bills[bill_id] = bill
def scrape_actions(self):
url = "ftp://www.arkleg.state.ar.us/SessionInformation/ChamberActions.txt"
page = csv.reader(get_utf_16_ftp_content(url).splitlines(), delimiter="|")
for row in page:
bill_id = "%s%s %s" % (row[1], row[2], row[3])
if bill_id not in self.bills:
continue
# different term
if row[10] != self.slug:
continue
actor = {"H": "lower", "S": "upper"}[row[7].upper()]
date = TIMEZONE.localize(
datetime.datetime.strptime(row[5], "%Y-%m-%d %H:%M:%S.%f")
)
date = "{:%Y-%m-%d}".format(date)
action = row[6]
action_type = []
if action.startswith("Filed"):
action_type.append("introduction")
elif action.startswith("Read first time") or action.startswith(
"Read the first time"
):
action_type.append("reading-1")
if re.match("Read the first time, .*, read the second time", action):
action_type.append("reading-2")
elif action.startswith("Read the third time and passed"):
action_type.append("passage")
action_type.append("reading-3")
elif action.startswith("Read the third time"):
action_type.append("reading-3")
elif action.startswith("DELIVERED TO GOVERNOR"):
action_type.append("executive-receipt")
elif action.startswith("Notification"):
action_type.append("executive-signature")
if "referred to" in action:
action_type.append("referral-committee")
if "Returned by the Committee" in action:
if "recommendation that it Do Pass" in action:
action_type.append("committee-passage-favorable")
else:
action_type.append("committee-passage")
if re.match(r"Amendment No\. \d+ read and adopted", action):
action_type.append("amendment-introduction")
action_type.append("amendment-passage")
if not action:
action = "[No text provided]"
self.bills[bill_id].add_action(
action, date, chamber=actor, classification=action_type
)
def scrape_bill_page(self, bill):
# We need to scrape each bill page in order to grab associated votes.
# It's still more efficient to get the rest of the data we're
# interested in from the CSVs, though, because their site splits
# other info (e.g. actions) across many pages
session_year = int(self.slug[:4])
odd_year = session_year if session_year % 2 else session_year - 1
measureno = bill.identifier.replace(" ", "")
url = (
"http://www.arkleg.state.ar.us/assembly/%s/%s/"
"Pages/BillInformation.aspx?measureno=%s" % (odd_year, self.slug, measureno)
)
page = self.get(url).text
bill.add_source(url)
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for link in page.xpath("//a[contains(@href, 'Amendments')]"):
num = link.xpath("string(../../td[2])")
name = "Amendment %s" % num
bill.add_version_link(
name, link.attrib["href"], media_type="application/pdf"
)
try:
cosponsor_link = page.xpath("//a[contains(@href, 'CoSponsors')]")[0]
self.scrape_cosponsors(bill, cosponsor_link.attrib["href"])
except IndexError:
# No cosponsor link is OK
pass
for link in page.xpath("//a[contains(@href, 'votes.aspx')]"):
date = link.xpath("string(../../td[2])")
date = TIMEZONE.localize(
datetime.datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p")
)
motion = link.xpath("string(../../td[3])")
yield from self.scrape_vote(bill, date, motion, link.attrib["href"])
def scrape_vote(self, bill, date, motion, url):
page = self.get(url).text
if "not yet official" in page:
# Sometimes they link to vote pages before they go live
pass
else:
page = lxml.html.fromstring(page)
if url.endswith("Senate"):
actor = "upper"
else:
actor = "lower"
votevals = ["yes", "no", "not voting", "other"]
count_path = "string(//td[@align = 'center' and contains(., '%s: ')])"
yes_count = int(page.xpath(count_path % "Yeas").split()[-1])
no_count = int(page.xpath(count_path % "Nays").split()[-1])
not_voting_count = int(page.xpath(count_path % "Non Voting").split()[-1])
other_count = int(page.xpath(count_path % "Present").split()[-1])
passed = yes_count > no_count + not_voting_count + other_count
vote = VoteEvent(
start_date=date,
motion_text=motion,
result="pass" if passed else "fail",
classification="passage",
chamber=actor,
bill=bill,
)
try:
excused_count = int(page.xpath(count_path % "Excused").split()[-1])
vote.set_count("excused", excused_count)
votevals.append("excused")
except (ValueError, IndexError):
pass
vote.set_count("yes", yes_count)
vote.set_count("no", no_count)
vote.set_count("not voting", not_voting_count)
vote.set_count("other", other_count)
vote.add_source(url)
xpath = (
'//*[contains(@class, "ms-standardheader")]/' "following-sibling::table"
)
divs = page.xpath(xpath)
for (voteval, div) in zip(votevals, divs):
for a in div.xpath(".//a"):
name = a.text_content().strip()
if not name:
continue
else:
vote.vote(voteval, name)
yield vote
def scrape_cosponsors(self, bill, url):
page = self.get(url).text
page = lxml.html.fromstring(page)
| gpl-3.0 |
leorochael/odoo | addons/l10n_be_intrastat/l10n_be_intrastat.py | 258 | 7828 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'incoterm_id': fields.many2one(
'stock.incoterms', 'Incoterm',
help="International Commercial Terms are a series of predefined commercial terms "
"used in international transactions."),
'intrastat_transaction_id': fields.many2one(
'l10n_be_intrastat.transaction', 'Intrastat Transaction Type',
help="Intrastat nature of transaction"),
'transport_mode_id': fields.many2one(
'l10n_be_intrastat.transport_mode', 'Intrastat Transport Mode'),
'intrastat_country_id': fields.many2one(
'res.country', 'Intrastat Country',
help='Intrastat country, delivery for sales, origin for purchases',
domain=[('intrastat','=',True)]),
}
class intrastat_region(osv.osv):
_name = 'l10n_be_intrastat.region'
_columns = {
'code': fields.char('Code', required=True),
'country_id': fields.many2one('res.country', 'Country'),
'name': fields.char('Name', translate=True),
'description': fields.char('Description'),
}
_sql_constraints = [
('l10n_be_intrastat_regioncodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transaction(osv.osv):
_name = 'l10n_be_intrastat.transaction'
_rec_name = 'code'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'description': fields.text('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trcodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transport_mode(osv.osv):
_name = 'l10n_be_intrastat.transport_mode'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'name': fields.char('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trmodecodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class product_category(osv.osv):
_name = "product.category"
_inherit = "product.category"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat Code'),
}
def get_intrastat_recursively(self, cr, uid, category, context=None):
""" Recursively search in categories to find an intrastat code id
:param category : Browse record of a category
"""
if category.intrastat_id:
res = category.intrastat_id.id
elif category.parent_id:
res = self.get_intrastat_recursively(cr, uid, category.parent_id, context=context)
else:
res = None
return res
class product_product(osv.osv):
_name = "product.product"
_inherit = "product.product"
def get_intrastat_recursively(self, cr, uid, id, context=None):
""" Recursively search in categories to find an intrastat code id
"""
product = self.browse(cr, uid, id, context=context)
if product.intrastat_id:
res = product.intrastat_id.id
elif product.categ_id:
res = self.pool['product.category'].get_intrastat_recursively(
cr, uid, product.categ_id, context=context)
else:
res = None
return res
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""
copy incoterm from purchase order to invoice
"""
invoice = super(purchase_order, self)._prepare_invoice(
cr, uid, order, line_ids, context=context)
if order.incoterm_id:
invoice['incoterm_id'] = order.incoterm_id.id
#Try to determine products origin
if order.partner_id.country_id:
#It comes from supplier
invoice['intrastat_country_id'] = order.partner_id.country_id.id
return invoice
class report_intrastat_code(osv.osv):
_inherit = "report.intrastat.code"
_columns = {
'description': fields.text('Description', translate=True),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
'transport_mode_id': fields.many2one('l10n_be_intrastat.transport_mode',
'Default transport mode'),
'incoterm_id': fields.many2one('stock.incoterms', 'Default incoterm for Intrastat',
help="International Commercial Terms are a series of "
"predefined commercial terms used in international "
"transactions."),
}
class sale_order(osv.osv):
_inherit = "sale.order"
def _prepare_invoice(self, cr, uid, saleorder, lines, context=None):
"""
copy incoterm from sale order to invoice
"""
invoice = super(sale_order, self)._prepare_invoice(
cr, uid, saleorder, lines, context=context)
if saleorder.incoterm:
invoice['incoterm_id'] = saleorder.incoterm.id
# Guess products destination
if saleorder.partner_shipping_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_shipping_id.country_id.id
elif saleorder.partner_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_id.country_id.id
elif saleorder.partner_invoice_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_invoice_id.country_id.id
return invoice
class stock_warehouse(osv.osv):
_inherit = "stock.warehouse"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
}
def get_regionid_from_locationid(self, cr, uid, location_id, context=None):
location_model = self.pool['stock.location']
location = location_model.browse(cr, uid, location_id, context=context)
location_ids = location_model.search(cr, uid,
[('parent_left', '<=', location.parent_left),
('parent_right', '>=', location.parent_right)],
context=context)
warehouse_ids = self.search(cr, uid,
[('lot_stock_id', 'in', location_ids),
('region_id', '!=', False)],
context=context)
warehouses = self.browse(cr, uid, warehouse_ids, context=context)
if warehouses and warehouses[0]:
return warehouses[0].region_id.id
return None
| agpl-3.0 |
ARG-TLQ/Red-DiscordBot | redbot/vendored/discord/ext/menus/__init__.py | 4 | 40800 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import itertools
import inspect
import bisect
import re
from collections import OrderedDict, namedtuple
# Needed for the setup.py script
__version__ = '1.0.0-a'
class MenuError(Exception):
pass
class CannotEmbedLinks(MenuError):
def __init__(self):
super().__init__('Bot does not have embed links permission in this channel.')
class CannotSendMessages(MenuError):
def __init__(self):
super().__init__('Bot cannot send messages in this channel.')
class CannotAddReactions(MenuError):
def __init__(self):
super().__init__('Bot cannot add reactions in this channel.')
class CannotReadMessageHistory(MenuError):
def __init__(self):
super().__init__('Bot does not have Read Message History permissions in this channel.')
class Position:
__slots__ = ('number', 'bucket')
def __init__(self, number, *, bucket=1):
self.bucket = bucket
self.number = number
def __lt__(self, other):
if not isinstance(other, Position) or not isinstance(self, Position):
return NotImplemented
return (self.bucket, self.number) < (other.bucket, other.number)
def __eq__(self, other):
return isinstance(other, Position) and other.bucket == self.bucket and other.number == self.number
def __le__(self, other):
r = Position.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self, other):
return Position.__lt__(other, self)
def __ge__(self, other):
r = Position.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def __repr__(self):
return '<{0.__class__.__name__}: {0.number}>'.format(self)
class Last(Position):
__slots__ = ()
def __init__(self, number=0):
super().__init__(number, bucket=2)
class First(Position):
__slots__ = ()
def __init__(self, number=0):
super().__init__(number, bucket=0)
_custom_emoji = re.compile(r'<?(?P<animated>a)?:?(?P<name>[A-Za-z0-9\_]+):(?P<id>[0-9]{13,21})>?')
def _cast_emoji(obj, *, _custom_emoji=_custom_emoji):
if isinstance(obj, discord.PartialEmoji):
return obj
obj = str(obj)
match = _custom_emoji.match(obj)
if match is not None:
groups = match.groupdict()
animated = bool(groups['animated'])
emoji_id = int(groups['id'])
name = groups['name']
return discord.PartialEmoji(name=name, animated=animated, id=emoji_id)
return discord.PartialEmoji(name=obj, id=None, animated=False)
class Button:
"""Represents a reaction-style button for the :class:`Menu`.
There are two ways to create this, the first being through explicitly
creating this class and the second being through the decorator interface,
:func:`button`.
The action must have both a ``self`` and a ``payload`` parameter
of type :class:`discord.RawReactionActionEvent`.
Attributes
------------
emoji: :class:`discord.PartialEmoji`
The emoji to use as the button. Note that passing a string will
transform it into a :class:`discord.PartialEmoji`.
action
A coroutine that is called when the button is pressed.
skip_if: Optional[Callable[[:class:`Menu`], :class:`bool`]]
A callable that detects whether it should be skipped.
A skipped button does not show up in the reaction list
and will not be processed.
position: :class:`Position`
The position the button should have in the initial order.
Note that since Discord does not actually maintain reaction
order, this is a best effort attempt to have an order until
the user restarts their client. Defaults to ``Position(0)``.
lock: :class:`bool`
Whether the button should lock all other buttons from being processed
until this button is done. Defaults to ``True``.
"""
__slots__ = ('emoji', '_action', '_skip_if', 'position', 'lock')
def __init__(self, emoji, action, *, skip_if=None, position=None, lock=True):
self.emoji = _cast_emoji(emoji)
self.action = action
self.skip_if = skip_if
self.position = position or Position(0)
self.lock = lock
@property
def skip_if(self):
return self._skip_if
@skip_if.setter
def skip_if(self, value):
if value is None:
self._skip_if = lambda x: False
return
try:
menu_self = value.__self__
except AttributeError:
self._skip_if = value
else:
# Unfurl the method to not be bound
if not isinstance(menu_self, Menu):
raise TypeError('skip_if bound method must be from Menu not %r' % menu_self)
self._skip_if = value.__func__
@property
def action(self):
return self._action
@action.setter
def action(self, value):
try:
menu_self = value.__self__
except AttributeError:
pass
else:
# Unfurl the method to not be bound
if not isinstance(menu_self, Menu):
raise TypeError('action bound method must be from Menu not %r' % menu_self)
value = value.__func__
if not inspect.iscoroutinefunction(value):
raise TypeError('action must be a coroutine not %r' % value)
self._action = value
def __call__(self, menu, payload):
if self.skip_if(menu):
return
return self._action(menu, payload)
def __str__(self):
return str(self.emoji)
def is_valid(self, menu):
return not self.skip_if(menu)
def button(emoji, **kwargs):
"""Denotes a method to be button for the :class:`Menu`.
The methods being wrapped must have both a ``self`` and a ``payload``
parameter of type :class:`discord.RawReactionActionEvent`.
The keyword arguments are forwarded to the :class:`Button` constructor.
Example
---------
.. code-block:: python3
class MyMenu(Menu):
async def send_initial_message(self, ctx, channel):
return await channel.send(f'Hello {ctx.author}')
@button('\\N{THUMBS UP SIGN}')
async def on_thumbs_up(self, payload):
await self.message.edit(content=f'Thanks {self.ctx.author}!')
@button('\\N{THUMBS DOWN SIGN}')
async def on_thumbs_down(self, payload):
await self.message.edit(content=f"That's not nice {self.ctx.author}...")
Parameters
------------
emoji: Union[:class:`str`, :class:`discord.PartialEmoji`]
The emoji to use for the button.
"""
def decorator(func):
func.__menu_button__ = _cast_emoji(emoji)
func.__menu_button_kwargs__ = kwargs
return func
return decorator
class _MenuMeta(type):
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# This is needed to maintain member order for the buttons
return OrderedDict()
def __new__(cls, name, bases, attrs, **kwargs):
buttons = []
new_cls = super().__new__(cls, name, bases, attrs)
inherit_buttons = kwargs.pop('inherit_buttons', True)
if inherit_buttons:
# walk MRO to get all buttons even in subclasses
for base in reversed(new_cls.__mro__):
for elem, value in base.__dict__.items():
try:
value.__menu_button__
except AttributeError:
continue
else:
buttons.append(value)
else:
for elem, value in attrs.items():
try:
value.__menu_button__
except AttributeError:
continue
else:
buttons.append(value)
new_cls.__menu_buttons__ = buttons
return new_cls
def get_buttons(cls):
buttons = OrderedDict()
for func in cls.__menu_buttons__:
emoji = func.__menu_button__
buttons[emoji] = Button(emoji, func, **func.__menu_button_kwargs__)
return buttons
class Menu(metaclass=_MenuMeta):
r"""An interface that allows handling menus by using reactions as buttons.
Buttons should be marked with the :func:`button` decorator. Please note that
this expects the methods to have a single parameter, the ``payload``. This
``payload`` is of type :class:`discord.RawReactionActionEvent`.
Attributes
------------
timeout: :class:`float`
The timeout to wait between button inputs.
delete_message_after: :class:`bool`
Whether to delete the message after the menu interaction is done.
clear_reactions_after: :class:`bool`
Whether to clear reactions after the menu interaction is done.
Note that :attr:`delete_message_after` takes priority over this attribute.
If the bot does not have permissions to clear the reactions then it will
delete the reactions one by one.
check_embeds: :class:`bool`
Whether to verify embed permissions as well.
ctx: Optional[:class:`commands.Context`]
The context that started this pagination session or ``None`` if it hasn't
been started yet.
bot: Optional[:class:`commands.Bot`]
The bot that is running this pagination session or ``None`` if it hasn't
been started yet.
message: Optional[:class:`discord.Message`]
The message that has been sent for handling the menu. This is the returned
message of :meth:`send_initial_message`. You can set it in order to avoid
calling :meth:`send_initial_message`\, if for example you have a pre-existing
message you want to attach a menu to.
"""
def __init__(self, *, timeout=180.0, delete_message_after=False,
clear_reactions_after=False, check_embeds=False, message=None):
self.timeout = timeout
self.delete_message_after = delete_message_after
self.clear_reactions_after = clear_reactions_after
self.check_embeds = check_embeds
self._can_remove_reactions = False
self.__tasks = []
self._running = True
self.message = message
self.ctx = None
self.bot = None
self._author_id = None
self._buttons = self.__class__.get_buttons()
self._lock = asyncio.Lock()
self._event = asyncio.Event()
@discord.utils.cached_property
def buttons(self):
"""Retrieves the buttons that are to be used for this menu session.
Skipped buttons are not in the resulting dictionary.
Returns
---------
Mapping[:class:`str`, :class:`Button`]
A mapping of button emoji to the actual button class.
"""
buttons = sorted(self._buttons.values(), key=lambda b: b.position)
return {
button.emoji: button
for button in buttons
if button.is_valid(self)
}
def add_button(self, button, *, react=False):
"""|maybecoro|
Adds a button to the list of buttons.
If the menu has already been started then the button will
not be added unless the ``react`` keyword-only argument is
set to ``True``. Note that when this happens this function
will need to be awaited.
If a button with the same emoji is added then it is overridden.
.. warning::
If the menu has started and the reaction is added, the order
property of the newly added button is ignored due to an API
limitation with Discord and the fact that reaction ordering
is not guaranteed.
Parameters
------------
button: :class:`Button`
The button to add.
react: :class:`bool`
Whether to add a reaction if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Adding the reaction failed.
"""
self._buttons[button.emoji] = button
if react:
if self.__tasks:
async def wrapped():
# Add the reaction
try:
await self.message.add_reaction(button.emoji)
except discord.HTTPException:
raise
else:
# Update the cache to have the value
self.buttons[button.emoji] = button
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def remove_button(self, emoji, *, react=False):
"""|maybecoro|
Removes a button from the list of buttons.
This operates similar to :meth:`add_button`.
Parameters
------------
emoji: Union[:class:`Button`, :class:`str`]
The emoji or the button to remove.
react: :class:`bool`
Whether to remove the reaction if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Removing the reaction failed.
"""
if isinstance(emoji, Button):
emoji = emoji.emoji
else:
emoji = _cast_emoji(emoji)
self._buttons.pop(emoji, None)
if react:
if self.__tasks:
async def wrapped():
# Remove the reaction from being processable
# Removing it from the cache first makes it so the check
# doesn't get triggered.
self.buttons.pop(emoji, None)
await self.message.remove_reaction(emoji, self.__me)
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def clear_buttons(self, *, react=False):
"""|maybecoro|
Removes all buttons from the list of buttons.
If the menu has already been started then the buttons will
not be removed unless the ``react`` keyword-only argument is
set to ``True``. Note that when this happens this function
will need to be awaited.
Parameters
------------
react: :class:`bool`
Whether to clear the reactions if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Clearing the reactions failed.
"""
self._buttons.clear()
if react:
if self.__tasks:
async def wrapped():
# A fast path if we have permissions
if self._can_remove_reactions:
try:
del self.buttons
except AttributeError:
pass
finally:
await self.message.clear_reactions()
return
# Remove the cache (the next call will have the updated buttons)
reactions = list(self.buttons.keys())
try:
del self.buttons
except AttributeError:
pass
for reaction in reactions:
await self.message.remove_reaction(reaction, self.__me)
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def should_add_reactions(self):
""":class:`bool`: Whether to add reactions to this menu session."""
return len(self.buttons)
def _verify_permissions(self, ctx, channel, permissions):
if not permissions.send_messages:
raise CannotSendMessages()
if self.check_embeds and not permissions.embed_links:
raise CannotEmbedLinks()
self._can_remove_reactions = permissions.manage_messages
if self.should_add_reactions():
if not permissions.add_reactions:
raise CannotAddReactions()
if not permissions.read_message_history:
raise CannotReadMessageHistory()
def reaction_check(self, payload):
"""The function that is used to check whether the payload should be processed.
This is passed to :meth:`discord.ext.commands.Bot.wait_for <Bot.wait_for>`.
There should be no reason to override this function for most users.
Parameters
------------
payload: :class:`discord.RawReactionActionEvent`
The payload to check.
Returns
---------
:class:`bool`
Whether the payload should be processed.
"""
if payload.message_id != self.message.id:
return False
if payload.user_id not in {self.bot.owner_id, self._author_id, *self.bot.owner_ids}:
return False
return payload.emoji in self.buttons
async def _internal_loop(self):
try:
self.__timed_out = False
loop = self.bot.loop
# Ensure the name exists for the cancellation handling
tasks = []
while self._running:
tasks = [
asyncio.ensure_future(self.bot.wait_for('raw_reaction_add', check=self.reaction_check)),
asyncio.ensure_future(self.bot.wait_for('raw_reaction_remove', check=self.reaction_check))
]
done, pending = await asyncio.wait(tasks, timeout=self.timeout, return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
if len(done) == 0:
raise asyncio.TimeoutError()
# Exception will propagate if e.g. cancelled or timed out
payload = done.pop().result()
loop.create_task(self.update(payload))
# NOTE: Removing the reaction ourselves after it's been done when
# mixed with the checks above is incredibly racy.
# There is no guarantee when the MESSAGE_REACTION_REMOVE event will
# be called, and chances are when it does happen it'll always be
# after the remove_reaction HTTP call has returned back to the caller
# which means that the stuff above will catch the reaction that we
# just removed.
# For the future sake of myself and to save myself the hours in the future
# consider this my warning.
except asyncio.TimeoutError:
self.__timed_out = True
finally:
self._event.set()
# Cancel any outstanding tasks (if any)
for task in tasks:
task.cancel()
try:
await self.finalize(self.__timed_out)
except Exception:
pass
finally:
self.__timed_out = False
# Can't do any requests if the bot is closed
if self.bot.is_closed():
return
# Wrap it in another block anyway just to ensure
# nothing leaks out during clean-up
try:
if self.delete_message_after:
return await self.message.delete()
if self.clear_reactions_after:
if self._can_remove_reactions:
return await self.message.clear_reactions()
for button_emoji in self.buttons:
try:
await self.message.remove_reaction(button_emoji, self.__me)
except discord.HTTPException:
continue
except Exception:
pass
async def update(self, payload):
"""|coro|
Updates the menu after an event has been received.
Parameters
-----------
payload: :class:`discord.RawReactionActionEvent`
The reaction event that triggered this update.
"""
button = self.buttons[payload.emoji]
if not self._running:
return
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, payload)
else:
await button(self, payload)
except Exception:
# TODO: logging?
import traceback
traceback.print_exc()
async def start(self, ctx, *, channel=None, wait=False):
"""|coro|
Starts the interactive menu session.
Parameters
-----------
ctx: :class:`Context`
The invocation context to use.
channel: :class:`discord.abc.Messageable`
The messageable to send the message to. If not given
then it defaults to the channel in the context.
wait: :class:`bool`
Whether to wait until the menu is completed before
returning back to the caller.
Raises
-------
MenuError
An error happened when verifying permissions.
discord.HTTPException
Adding a reaction failed.
"""
# Clear the buttons cache and re-compute if possible.
try:
del self.buttons
except AttributeError:
pass
self.bot = bot = ctx.bot
self.ctx = ctx
self._author_id = ctx.author.id
channel = channel or ctx.channel
is_guild = isinstance(channel, discord.abc.GuildChannel)
me = ctx.guild.me if is_guild else ctx.bot.user
permissions = channel.permissions_for(me)
self.__me = discord.Object(id=me.id)
self._verify_permissions(ctx, channel, permissions)
self._event.clear()
msg = self.message
if msg is None:
self.message = msg = await self.send_initial_message(ctx, channel)
if self.should_add_reactions():
# Start the task first so we can listen to reactions before doing anything
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
self._running = True
self.__tasks.append(bot.loop.create_task(self._internal_loop()))
async def add_reactions_task():
for emoji in self.buttons:
await msg.add_reaction(emoji)
self.__tasks.append(bot.loop.create_task(add_reactions_task()))
if wait:
await self._event.wait()
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
return
async def send_initial_message(self, ctx, channel):
"""|coro|
Sends the initial message for the menu session.
This is internally assigned to the :attr:`message` attribute.
Subclasses must implement this if they don't set the
:attr:`message` attribute themselves before starting the
menu via :meth:`start`.
Parameters
------------
ctx: :class:`Context`
The invocation context to use.
channel: :class:`discord.abc.Messageable`
The messageable to send the message to.
Returns
--------
:class:`discord.Message`
The message that has been sent.
"""
raise NotImplementedError
def stop(self):
"""Stops the internal loop."""
self._running = False
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
class PageSource:
"""An interface representing a menu page's data source for the actual menu page.
Subclasses must implement the backing resource along with the following methods:
- :meth:`get_page`
- :meth:`is_paginating`
- :meth:`format_page`
"""
async def _prepare_once(self):
try:
# Don't feel like formatting hasattr with
# the proper mangling
# read this as follows:
# if hasattr(self, '__prepare')
# except that it works as you expect
self.__prepare
except AttributeError:
await self.prepare()
self.__prepare = True
async def prepare(self):
"""|coro|
A coroutine that is called after initialisation
but before anything else to do some asynchronous set up
as well as the one provided in ``__init__``.
By default this does nothing.
This coroutine will only be called once.
"""
return
def is_paginating(self):
"""An abstract method that notifies the :class:`MenuPages` whether or not
to start paginating. This signals whether to add reactions or not.
Subclasses must implement this.
Returns
--------
:class:`bool`
Whether to trigger pagination.
"""
raise NotImplementedError
def get_max_pages(self):
"""An optional abstract method that retrieves the maximum number of pages
this page source has. Useful for UX purposes.
The default implementation returns ``None``.
Returns
--------
Optional[:class:`int`]
The maximum number of pages required to properly
paginate the elements, if given.
"""
return None
async def get_page(self, page_number):
"""|coro|
An abstract method that retrieves an object representing the object to format.
Subclasses must implement this.
.. note::
The page_number is zero-indexed between [0, :meth:`get_max_pages`),
if there is a maximum number of pages.
Parameters
-----------
page_number: :class:`int`
The page number to access.
Returns
---------
Any
The object represented by that page.
This is passed into :meth:`format_page`.
"""
raise NotImplementedError
async def format_page(self, menu, page):
"""|maybecoro|
An abstract method to format the page.
This method must return one of the following types.
If this method returns a ``str`` then it is interpreted as returning
the ``content`` keyword argument in :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`.
If this method returns a :class:`discord.Embed` then it is interpreted
as returning the ``embed`` keyword argument in :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`.
If this method returns a ``dict`` then it is interpreted as the
keyword-arguments that are used in both :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`. The two of interest are
``embed`` and ``content``.
Parameters
------------
menu: :class:`Menu`
The menu that wants to format this page.
page: Any
The page returned by :meth:`PageSource.get_page`.
Returns
---------
Union[:class:`str`, :class:`discord.Embed`, :class:`dict`]
See above.
"""
raise NotImplementedError
class MenuPages(Menu):
"""A special type of Menu dedicated to pagination.
Attributes
------------
current_page: :class:`int`
The current page that we are in. Zero-indexed
between [0, :attr:`PageSource.max_pages`).
"""
def __init__(self, source, **kwargs):
self._source = source
self.current_page = 0
super().__init__(**kwargs)
@property
def source(self):
""":class:`PageSource`: The source where the data comes from."""
return self._source
async def change_source(self, source):
"""|coro|
Changes the :class:`PageSource` to a different one at runtime.
Once the change has been set, the menu is moved to the first
page of the new source if it was started. This effectively
changes the :attr:`current_page` to 0.
Raises
--------
TypeError
A :class:`PageSource` was not passed.
"""
if not isinstance(source, PageSource):
raise TypeError('Expected {0!r} not {1.__class__!r}.'.format(PageSource, source))
self._source = source
self.current_page = 0
if self.message is not None:
await source._prepare_once()
await self.show_page(0)
def should_add_reactions(self):
return self._source.is_paginating()
async def _get_kwargs_from_page(self, page):
value = await discord.utils.maybe_coroutine(self._source.format_page, self, page)
if isinstance(value, dict):
return value
elif isinstance(value, str):
return { 'content': value, 'embed': None }
elif isinstance(value, discord.Embed):
return { 'embed': value, 'content': None }
async def show_page(self, page_number):
page = await self._source.get_page(page_number)
self.current_page = page_number
kwargs = await self._get_kwargs_from_page(page)
await self.message.edit(**kwargs)
async def send_initial_message(self, ctx, channel):
"""|coro|
The default implementation of :meth:`Menu.send_initial_message`
for the interactive pagination session.
This implementation shows the first page of the source.
"""
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
return await channel.send(**kwargs)
async def start(self, ctx, *, channel=None, wait=False):
await self._source._prepare_once()
await super().start(ctx, channel=channel, wait=wait)
async def show_checked_page(self, page_number):
max_pages = self._source.get_max_pages()
try:
if max_pages is None:
# If it doesn't give maximum pages, it cannot be checked
await self.show_page(page_number)
elif max_pages > page_number >= 0:
await self.show_page(page_number)
except IndexError:
# An error happened that can be handled, so ignore it.
pass
async def show_current_page(self):
if self._source.paginating:
await self.show_page(self.current_page)
def _skip_double_triangle_buttons(self):
max_pages = self._source.get_max_pages()
if max_pages is None:
return True
return max_pages <= 2
@button('\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f',
position=First(0), skip_if=_skip_double_triangle_buttons)
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@button('\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f', position=First(1))
async def go_to_previous_page(self, payload):
"""go to the previous page"""
await self.show_checked_page(self.current_page - 1)
@button('\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f', position=Last(0))
async def go_to_next_page(self, payload):
"""go to the next page"""
await self.show_checked_page(self.current_page + 1)
@button('\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f',
position=Last(1), skip_if=_skip_double_triangle_buttons)
async def go_to_last_page(self, payload):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
@button('\N{BLACK SQUARE FOR STOP}\ufe0f', position=Last(2))
async def stop_pages(self, payload):
"""stops the pagination session."""
self.stop()
class ListPageSource(PageSource):
"""A data source for a sequence of items.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Attributes
------------
entries: Sequence[Any]
The sequence of items to paginate.
per_page: :class:`int`
How many elements are in a page.
"""
def __init__(self, entries, *, per_page):
self.entries = entries
self.per_page = per_page
pages, left_over = divmod(len(entries), per_page)
if left_over:
pages += 1
self._max_pages = pages
def is_paginating(self):
""":class:`bool`: Whether pagination is required."""
return len(self.entries) > self.per_page
def get_max_pages(self):
""":class:`int`: The maximum number of pages required to paginate this sequence."""
return self._max_pages
async def get_page(self, page_number):
"""Returns either a single element of the sequence or
a slice of the sequence.
If :attr:`per_page` is set to ``1`` then this returns a single
element. Otherwise it returns at most :attr:`per_page` elements.
Returns
---------
Union[Any, List[Any]]
The data returned.
"""
if self.per_page == 1:
return self.entries[page_number]
else:
base = page_number * self.per_page
return self.entries[base:base + self.per_page]
_GroupByEntry = namedtuple('_GroupByEntry', 'key items')
class GroupByPageSource(ListPageSource):
"""A data source for grouped by sequence of items.
This inherits from :class:`ListPageSource`.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Parameters
------------
entries: Sequence[Any]
The sequence of items to paginate and group.
key: Callable[[Any], Any]
A key function to do the grouping with.
sort: :class:`bool`
Whether to sort the sequence before grouping it.
The elements are sorted according to the ``key`` function passed.
per_page: :class:`int`
How many elements to have per page of the group.
"""
def __init__(self, entries, *, key, per_page, sort=True):
self.__entries = entries if not sort else sorted(entries, key=key)
nested = []
self.nested_per_page = per_page
for k, g in itertools.groupby(self.__entries, key=key):
g = list(g)
if not g:
continue
size = len(g)
# Chunk the nested pages
nested.extend(_GroupByEntry(key=k, items=g[i:i+per_page]) for i in range(0, size, per_page))
super().__init__(nested, per_page=1)
async def get_page(self, page_number):
return self.entries[page_number]
async def format_page(self, menu, entry):
"""An abstract method to format the page.
This works similar to the :meth:`ListPageSource.format_page` except
the return type of the ``entry`` parameter is documented.
Parameters
------------
menu: :class:`Menu`
The menu that wants to format this page.
entry
A namedtuple with ``(key, items)`` representing the key of the
group by function and a sequence of paginated items within that
group.
Returns
---------
:class:`dict`
A dictionary representing keyword-arguments to pass to
the message related calls.
"""
raise NotImplementedError
def _aiter(obj, *, _isasync=inspect.iscoroutinefunction):
cls = obj.__class__
try:
async_iter = cls.__aiter__
except AttributeError:
raise TypeError('{0.__name__!r} object is not an async iterable'.format(cls))
async_iter = async_iter(obj)
if _isasync(async_iter):
raise TypeError('{0.__name__!r} object is not an async iterable'.format(cls))
return async_iter
class AsyncIteratorPageSource(PageSource):
"""A data source for data backed by an asynchronous iterator.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Parameters
------------
iter: AsyncIterator[Any]
The asynchronous iterator to paginate.
per_page: :class:`int`
How many elements to have per page.
"""
def __init__(self, iterator, *, per_page):
self.iterator = _aiter(iterator)
self.per_page = per_page
self._exhausted = False
self._cache = []
async def _iterate(self, n):
it = self.iterator
cache = self._cache
for i in range(0, n):
try:
elem = await it.__anext__()
except StopAsyncIteration:
self._exhausted = True
break
else:
cache.append(elem)
async def prepare(self, *, _aiter=_aiter):
# Iterate until we have at least a bit more single page
await self._iterate(self.per_page + 1)
def is_paginating(self):
""":class:`bool`: Whether pagination is required."""
return len(self._cache) > self.per_page
async def _get_single_page(self, page_number):
if page_number < 0:
raise IndexError('Negative page number.')
if not self._exhausted and len(self._cache) <= page_number:
await self._iterate((page_number + 1) - len(self._cache))
return self._cache[page_number]
async def _get_page_range(self, page_number):
if page_number < 0:
raise IndexError('Negative page number.')
base = page_number * self.per_page
max_base = base + self.per_page
if not self._exhausted and len(self._cache) <= max_base:
await self._iterate((max_base + 1) - len(self._cache))
entries = self._cache[base:max_base]
if not entries and max_base > len(self._cache):
raise IndexError('Went too far')
return entries
async def get_page(self, page_number):
"""Returns either a single element of the sequence or
a slice of the sequence.
If :attr:`per_page` is set to ``1`` then this returns a single
element. Otherwise it returns at most :attr:`per_page` elements.
Returns
---------
Union[Any, List[Any]]
The data returned.
"""
if self.per_page == 1:
return await self._get_single_page(page_number)
else:
return await self._get_page_range(page_number)
| gpl-3.0 |
thicklizard/cm-otg-patch | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
rghe/ansible | lib/ansible/plugins/action/sros.py | 21 | 3325 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.sros.sros import sros_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(sros_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'sros'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
40223144/2015cdafinal | static/Brython3.1.1-20150328-091302/Lib/sre_parse.py | 630 | 29657 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
| gpl-3.0 |
dlazz/ansible | lib/ansible/cli/playbook.py | 5 | 8433 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import optparse_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.utils.display import Display
display = Display()
class PlaybookCLI(CLI):
''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
See the project home page (https://docs.ansible.com) for more information. '''
def init_parser(self):
# create parser for CLI options
super(PlaybookCLI, self).init_parser(
usage="%prog [options] playbook.yml [playbook2 ...]",
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
opt_help.add_connect_options(self.parser)
opt_help.add_meta_options(self.parser)
opt_help.add_runas_options(self.parser)
opt_help.add_subset_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
# ansible playbook specific opts
self.parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
self.parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
self.parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
def post_process_args(self, options, args):
options, args = super(PlaybookCLI, self).post_process_args(options, args)
if len(args) == 0:
raise AnsibleOptionsError("You must specify a playbook file to run")
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, vault_opts=True, fork_opts=True)
options = self.normalize_become_options(options)
return options, args
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# manages passwords
sshpass = None
becomepass = None
passwords = {}
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in context.CLIARGS['args']:
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# don't deal with privilege escalation or passwords when we don't need to
if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
loader, inventory, variable_manager = self._play_prereqs()
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
hosts = self.get_host_list(inventory, context.CLIARGS['subset'])
# flush fact cache if requested
if context.CLIARGS['flush_cache']:
self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
variable_manager=variable_manager, loader=loader,
passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if context.CLIARGS['listhosts']:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
taskmsg = ''
if context.CLIARGS['listtasks']:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action == 'meta':
continue
all_tags.update(task.tags)
if context.CLIARGS['listtasks']:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(play=play)
play_context = PlayContext(play=play)
for block in play.compile():
block = block.filter_tagged_tasks(play_context, all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if context.CLIARGS['listtags']:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
@staticmethod
def _flush_cache(inventory, variable_manager):
for host in inventory.list_hosts():
hostname = host.get_name()
variable_manager.clear_facts(hostname)
| gpl-3.0 |
zhengwsh/InplusTrader_Linux | rqalpha/examples/rsi.py | 5 | 2028 | from rqalpha.api import *
import talib
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
# 选择我们感兴趣的股票
context.s1 = "000001.XSHE"
context.s2 = "601988.XSHG"
context.s3 = "000068.XSHE"
context.stocks = [context.s1, context.s2, context.s3]
context.TIME_PERIOD = 14
context.HIGH_RSI = 85
context.LOW_RSI = 30
context.ORDER_PERCENT = 0.3
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
# 开始编写你的主要的算法逻辑
# bar_dict[order_book_id] 可以拿到某个证券的bar信息
# context.portfolio 可以拿到现在的投资组合状态信息
# 使用order_shares(id_or_ins, amount)方法进行落单
# TODO: 开始编写你的算法吧!
# 对我们选中的股票集合进行loop,运算每一只股票的RSI数值
for stock in context.stocks:
# 读取历史数据
prices = history_bars(stock, context.TIME_PERIOD+1, '1d', 'close')
# 用Talib计算RSI值
rsi_data = talib.RSI(prices, timeperiod=context.TIME_PERIOD)[-1]
cur_position = context.portfolio.positions[stock].quantity
# 用剩余现金的30%来购买新的股票
target_available_cash = context.portfolio.cash * context.ORDER_PERCENT
# 当RSI大于设置的上限阀值,清仓该股票
if rsi_data > context.HIGH_RSI and cur_position > 0:
order_target_value(stock, 0)
# 当RSI小于设置的下限阀值,用剩余cash的一定比例补仓该股
if rsi_data < context.LOW_RSI:
logger.info("target available cash caled: " + str(target_available_cash))
# 如果剩余的现金不够一手 - 100shares,那么会被ricequant 的order management system reject掉
order_value(stock, target_available_cash) | mit |
collinjackson/mojo | mojo/python/tests/bindings_structs_unittest.py | 10 | 7740 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
# pylint: disable=E0611,F0401
import mojo_system
# Generated files
# pylint: disable=F0401
import regression_tests_mojom
import sample_import_mojom
import sample_import2_mojom
import sample_service_mojom
class StructBindingsTest(unittest.TestCase):
def testModule(self):
self.assertEquals(sample_service_mojom.DefaultsTest.__module__,
'sample_service_mojom')
def testDefaultsTest(self):
defaults_test = sample_service_mojom.DefaultsTest()
self.assertEquals(defaults_test.a0, -12)
self.assertEquals(defaults_test.a1, 12)
self.assertEquals(defaults_test.a2, 1234)
self.assertEquals(defaults_test.a3, 34567)
self.assertEquals(defaults_test.a4, 123456)
self.assertEquals(defaults_test.a5, 3456789012)
self.assertEquals(defaults_test.a6, -111111111111)
self.assertEquals(defaults_test.a7, 9999999999999999999)
self.assertEquals(defaults_test.a8, 0x12345)
self.assertEquals(defaults_test.a9, -0x12345)
self.assertEquals(defaults_test.a10, 1234)
self.assertEquals(defaults_test.a11, True)
self.assertEquals(defaults_test.a12, False)
self.assertEquals(defaults_test.a13, 123.25)
self.assertEquals(defaults_test.a14, 1234567890.123)
self.assertEquals(defaults_test.a15, 1E10)
self.assertEquals(defaults_test.a16, -1.2E+20)
self.assertEquals(defaults_test.a17, 1.23E-20)
self.assertEquals(defaults_test.a18, None)
self.assertEquals(defaults_test.a19, None)
self.assertEquals(defaults_test.a20, sample_service_mojom.Bar.Type.BOTH)
self.assertEquals(defaults_test.a21, None)
self.assertTrue(isinstance(defaults_test.a22, sample_import2_mojom.Thing))
self.assertEquals(defaults_test.a23, 0xFFFFFFFFFFFFFFFF)
self.assertEquals(defaults_test.a24, 0x123456789)
self.assertEquals(defaults_test.a25, -0x123456789)
self.assertEquals(defaults_test.a26, float('inf'))
self.assertEquals(defaults_test.a27, float('-inf'))
self.assertTrue(math.isnan(defaults_test.a28))
self.assertEquals(defaults_test.a29, float('inf'))
self.assertEquals(defaults_test.a30, float('-inf'))
self.assertTrue(math.isnan(defaults_test.a31))
def testNoAliasing(self):
foo1 = sample_service_mojom.Foo()
foo2 = sample_service_mojom.Foo()
foo1.name = "foo1"
foo2.name = "foo2"
self.assertEquals(foo1.name, "foo1")
self.assertEquals(foo2.name, "foo2")
defaults_test1 = sample_service_mojom.DefaultsTest()
defaults_test2 = sample_service_mojom.DefaultsTest()
self.assertIsNot(defaults_test1.a22, defaults_test2.a22)
def testImmutableAttributeSet(self):
foo_instance = sample_service_mojom.Foo()
with self.assertRaises(AttributeError):
foo_instance.new_attribute = None
with self.assertRaises(AttributeError):
del foo_instance.name
def _TestIntegerField(self, entity, field_name, bits, signed):
if signed:
min_value = -(1 << (bits - 1))
max_value = (1 << (bits - 1)) - 1
else:
min_value = 0
max_value = (1 << bits) - 1
entity.__setattr__(field_name, min_value)
entity.__setattr__(field_name, max_value)
with self.assertRaises(TypeError):
entity.__setattr__(field_name, None)
with self.assertRaises(OverflowError):
entity.__setattr__(field_name, min_value - 1)
with self.assertRaises(OverflowError):
entity.__setattr__(field_name, max_value + 1)
with self.assertRaises(TypeError):
entity.__setattr__(field_name, 'hello world')
def testTypes(self):
defaults_test = sample_service_mojom.DefaultsTest()
# Integer types
self._TestIntegerField(defaults_test, 'a0', 8, True)
self._TestIntegerField(defaults_test, 'a1', 8, False)
self._TestIntegerField(defaults_test, 'a2', 16, True)
self._TestIntegerField(defaults_test, 'a3', 16, False)
self._TestIntegerField(defaults_test, 'a4', 32, True)
self._TestIntegerField(defaults_test, 'a5', 32, False)
self._TestIntegerField(defaults_test, 'a6', 64, True)
self._TestIntegerField(defaults_test, 'a7', 64, False)
# Boolean types
defaults_test.a11 = False
self.assertEquals(defaults_test.a11, False)
defaults_test.a11 = None
self.assertEquals(defaults_test.a11, False)
defaults_test.a11 = []
self.assertEquals(defaults_test.a11, False)
defaults_test.a12 = True
self.assertEquals(defaults_test.a12, True)
defaults_test.a12 = 1
self.assertEquals(defaults_test.a12, True)
defaults_test.a12 = [[]]
self.assertEquals(defaults_test.a12, True)
# Floating point types
with self.assertRaises(TypeError):
defaults_test.a13 = 'hello'
with self.assertRaises(TypeError):
defaults_test.a14 = 'hello'
# Array type
defaults_test.a18 = None
defaults_test.a18 = []
defaults_test.a18 = [ 0 ]
defaults_test.a18 = [ 255 ]
defaults_test.a18 = [ 0, 255 ]
with self.assertRaises(TypeError):
defaults_test.a18 = [[]]
with self.assertRaises(OverflowError):
defaults_test.a18 = [ -1 ]
with self.assertRaises(OverflowError):
defaults_test.a18 = [ 256 ]
# String type
defaults_test.a19 = None
defaults_test.a19 = ''
defaults_test.a19 = 'hello world'
with self.assertRaises(TypeError):
defaults_test.a19 = [[]]
with self.assertRaises(TypeError):
defaults_test.a19 = [ -1 ]
with self.assertRaises(TypeError):
defaults_test.a19 = [ 256 ]
# Structs
defaults_test.a21 = None
defaults_test.a21 = sample_import_mojom.Point()
with self.assertRaises(TypeError):
defaults_test.a21 = 1
with self.assertRaises(TypeError):
defaults_test.a21 = sample_import2_mojom.Thing()
# Handles
foo_instance = sample_service_mojom.Foo()
foo_instance.source = None
foo_instance.source = mojo_system.Handle()
with self.assertRaises(TypeError):
foo_instance.source = 1
with self.assertRaises(TypeError):
foo_instance.source = object()
def testConstructor(self):
bar_instance = sample_service_mojom.Bar()
foo_instance = sample_service_mojom.Foo(name="Foo",
x=-1,
y=5,
a=False,
bar=bar_instance)
self.assertEquals(foo_instance.name, "Foo")
self.assertEquals(foo_instance.x, -1)
self.assertEquals(foo_instance.y, 5)
self.assertEquals(foo_instance.a, False)
self.assertEquals(foo_instance.bar, bar_instance)
def testPositionalConstructor(self):
p = sample_import_mojom.Point()
self.assertEquals(p.x, 0)
self.assertEquals(p.y, 0)
p = sample_import_mojom.Point(34)
self.assertEquals(p.x, 34)
self.assertEquals(p.y, 0)
p = sample_import_mojom.Point(34, 12)
self.assertEquals(p.x, 34)
self.assertEquals(p.y, 12)
p = sample_import_mojom.Point(x=34, y=12)
self.assertEquals(p.x, 34)
self.assertEquals(p.y, 12)
p = sample_import_mojom.Point(34, y=12)
self.assertEquals(p.x, 34)
self.assertEquals(p.y, 12)
with self.assertRaises(TypeError):
p = sample_import_mojom.Point(0, 0, 0)
with self.assertRaises(TypeError):
p = sample_import_mojom.Point(0, x=0)
with self.assertRaises(TypeError):
p = sample_import_mojom.Point(c=0)
def testCyclicDefinition(self):
a = regression_tests_mojom.A()
b = regression_tests_mojom.B()
self.assertIsNone(a.b)
self.assertIsNone(b.a)
a.b = b
self.assertIs(a.b, b)
| bsd-3-clause |
JT5D/Alfred-Popclip-Sublime | Sublime Text 2/Package Control/package_control/downloaders/cert_provider.py | 11 | 7500 | import os
import re
import json
import sublime
from ..console_write import console_write
from ..open_compat import open_compat, read_compat
from ..package_io import read_package_file
from ..cache import get_cache
from ..ca_certs import get_system_ca_bundle_path
from .no_ca_cert_exception import NoCaCertException
from .downloader_exception import DownloaderException
class CertProvider(object):
"""
A base downloader that provides access to a ca-bundle for validating
SSL certificates.
"""
def check_certs(self, domain, timeout):
"""
Ensures that the SSL CA cert for a domain is present on the machine
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:raises:
NoCaCertException: when a suitable CA cert could not be found
:return:
The CA cert bundle path
"""
# Try to use the system CA bundle
ca_bundle_path = get_system_ca_bundle_path(self.settings)
if ca_bundle_path:
return ca_bundle_path
# If the system bundle did not work, fall back to our CA distribution
# system. Hopefully this will be going away soon.
if self.settings.get('debug'):
console_write(u'Unable to find system CA cert bundle, falling back to certs provided by Package Control')
cert_match = False
certs_list = get_cache('*.certs', self.settings.get('certs', {}))
ca_bundle_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-bundle')
if not os.path.exists(ca_bundle_path) or os.stat(ca_bundle_path).st_size == 0:
bundle_contents = read_package_file('Package Control', 'Package Control.ca-bundle', True)
if not bundle_contents:
raise NoCaCertException(u'Unable to copy distributed Package Control.ca-bundle', domain)
with open_compat(ca_bundle_path, 'wb') as f:
f.write(bundle_contents)
cert_info = certs_list.get(domain)
if cert_info:
cert_match = self.locate_cert(cert_info[0],
cert_info[1], domain, timeout)
wildcard_info = certs_list.get('*')
if wildcard_info:
cert_match = self.locate_cert(wildcard_info[0],
wildcard_info[1], domain, timeout) or cert_match
if not cert_match:
raise NoCaCertException(u'No CA certs available for %s' % domain, domain)
return ca_bundle_path
def locate_cert(self, cert_id, location, domain, timeout):
"""
Makes sure the SSL cert specified has been added to the CA cert
bundle that is present on the machine
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param location:
An http(s) URL, or absolute filesystem path to the CA cert(s)
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:return:
If the cert specified (by cert_id) is present on the machine and
part of the Package Control.ca-bundle file in the User package folder
"""
ca_list_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-list')
if not os.path.exists(ca_list_path) or os.stat(ca_list_path).st_size == 0:
list_contents = read_package_file('Package Control', 'Package Control.ca-list')
if not list_contents:
raise NoCaCertException(u'Unable to copy distributed Package Control.ca-list', domain)
with open_compat(ca_list_path, 'w') as f:
f.write(list_contents)
ca_certs = []
with open_compat(ca_list_path, 'r') as f:
ca_certs = json.loads(read_compat(f))
if not cert_id in ca_certs:
if str(location) != '':
if re.match('^https?://', location):
contents = self.download_cert(cert_id, location, domain,
timeout)
else:
contents = self.load_cert(cert_id, location, domain)
if contents:
self.save_cert(cert_id, contents)
return True
return False
return True
def download_cert(self, cert_id, url, domain, timeout):
"""
Downloads CA cert(s) from a URL
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param url:
An http(s) URL to the CA cert(s)
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:return:
The contents of the CA cert(s)
"""
cert_downloader = self.__class__(self.settings)
if self.settings.get('debug'):
console_write(u"Downloading CA cert for %s from \"%s\"" % (domain, url), True)
return cert_downloader.download(url,
'Error downloading CA certs for %s.' % domain, timeout, 1)
def load_cert(self, cert_id, path, domain):
"""
Copies CA cert(s) from a file path
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param path:
The absolute filesystem path to a file containing the CA cert(s)
:param domain:
The domain name the cert is for
:return:
The contents of the CA cert(s)
"""
if os.path.exists(path):
if self.settings.get('debug'):
console_write(u"Copying CA cert for %s from \"%s\"" % (domain, path), True)
with open_compat(path, 'rb') as f:
return f.read()
else:
raise NoCaCertException(u"Unable to find CA cert for %s at \"%s\"" % (domain, path), domain)
def save_cert(self, cert_id, contents):
"""
Saves CA cert(s) to the Package Control.ca-bundle
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param contents:
The contents of the CA cert(s)
"""
ca_bundle_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-bundle')
with open_compat(ca_bundle_path, 'ab') as f:
f.write(b"\n" + contents)
ca_list_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-list')
with open_compat(ca_list_path, 'r') as f:
ca_certs = json.loads(read_compat(f))
ca_certs.append(cert_id)
with open_compat(ca_list_path, 'w') as f:
f.write(json.dumps(ca_certs, indent=4))
| gpl-2.0 |
p12tic/zeroclickinfo-fathead | lib/fathead/htmlref/parse.py | 8 | 2977 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import cgi
import re
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def replace_all(text, terms):
""" Replaces all terms contained
in a dict """
for _from, _to in terms.items():
text = text.replace(_from, _to)
return text
class Tag(object):
""" Contains informations about
a HTML tag """
def __init__(self, name, info, reference, example):
self.name = name
self.info = info
self.reference = reference
# Remove excess padding around synopsis
self.example = re.sub('^\\n', '', example)
self.example = re.sub('\\n$', '', self.example)
self.example = replace_all(self.example, {'\n': '\\n',
'\t': '\\t',
'\r': ''})
def __str__(self):
fields = [
self.name, # $page
'', # $namespace
self.reference, # $url
self.info, # $description
self.example, # $synopsis (code)
'', # $details
'A', # $type
'' # $lang
]
output = '%s' % ('\t'.join(fields))
return output
class Parser(object):
""" Parses a HTML file to get
all tag informations inside it """
def __init__(self, input='download/index.html'):
self.soup = BeautifulSoup(open(input), from_encoding='utf-8')
def get_tags(self):
""" Gets all tags defined in 'dl' tags """
self.tags = []
for tag in self.soup.find_all('dl'):
name = tag.dt.contents[0]
# getting info about tag
info = ''
for p in tag.dd.find_all('p'):
info += p.getText() + ' '
# getting reference link and code snippet
a_tags = tag.dd.find_all('a')
example_id = a_tags[1]['href'].replace('#', '') # code snippet
example = self.soup.find('div', {'id': example_id}).getText()
# url reference (from HTML5Doctor if exists)
reference = ''
try:
reference = tag.dt.span.a['href'] # url for HTML5Doctor
except:
reference = a_tags[0]['href'] # url for W3C
reference = 'http://html5doctor.com/element-index/#' + name
new_tag = Tag(name, info, reference, example)
self.tags.append(new_tag)
logger.info('Tag parsed: %s' % new_tag.name)
if __name__ == '__main__':
parser = Parser()
parser.get_tags()
with open('output.txt', 'w') as file:
for tag in parser.tags:
file.write(tag.__str__().encode('utf-8') + '\n')
logger.info('Tag added to output: %s' % tag.name)
| apache-2.0 |
NexusIS/cobbler | cobbler/item_package.py | 15 | 2384 | """
Copyright 2006-2009, MadHatter
Kelsey Hightower <kelsey.hightower@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import resource
from cobbler.cexceptions import CX
from cobbler.utils import _
# this data structure is described in item.py
FIELDS = [
# non-editable in UI (internal)
["ctime", 0, 0, "", False, "", 0, "float"],
["depth", 2, 0, "", False, "", 0, "float"],
["mtime", 0, 0, "", False, "", 0, "float"],
["uid", "", 0, "", False, "", 0, "str"],
# editable in UI
["action", "create", 0, "Action", True, "Install or remove package resource", 0, "str"],
["comment", "", 0, "Comment", True, "Free form text description", 0, "str"],
["installer", "yum", 0, "Installer", True, "Package Manager", 0, "str"],
["name", "", 0, "Name", True, "Name of file resource", 0, "str"],
["owners", "SETTINGS:default_ownership", 0, "Owners", True, "Owners list for authz_ownership (space delimited)", [], "list"],
["version", "", 0, "Version", True, "Package Version", 0, "str"],
]
class Package(resource.Resource):
TYPE_NAME = _("package")
COLLECTION_TYPE = "package"
#
# override some base class methods first (item.Item)
#
def make_clone(self):
_dict = self.to_dict()
cloned = Package(self.collection_mgr)
cloned.from_dict(_dict)
return cloned
def get_fields(self):
return FIELDS
def check_if_valid(self):
if self.name is None or self.name == "":
raise CX("name is required")
#
# specific methods for item.Package
#
def set_installer(self, installer):
self.installer = installer.lower()
def set_version(self, version):
self.version = version
# EOF
| gpl-2.0 |
smn/django-cache-sweeper | myproject/settings.py | 1 | 3533 | # Django settings for myproject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db/myproject.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'kcq91^lvsj$0=#$&=$fj^=kw*p_725fvlsgs$tpqtw=zv8ln6o'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'django.contrib.markup',
'cachesweeper',
)
CACHE_BACKEND='memcached://127.0.0.1:11211'
| bsd-3-clause |
DirtyUnicorns/android_external_chromium_org | third_party/cython/src/Cython/Compiler/TypeSlots.py | 86 | 33972 | #
# Tables describing slots in the CPython type object
# and associated know-how.
#
import Naming
import PyrexTypes
import StringEncoding
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'b' bint
# 'I' int *
# 'l' long
# 'f' float
# 'd' double
# 'h' Py_hash_t
# 'z' Py_ssize_t
# 'Z' Py_ssize_t *
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# 'B' Py_buffer *
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'b': PyrexTypes.c_bint_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
'f': PyrexTypes.c_float_type,
'd': PyrexTypes.c_double_type,
'h': PyrexTypes.c_py_hash_t_type,
'z': PyrexTypes.c_py_ssize_t_type,
'Z': PyrexTypes.c_py_ssize_t_ptr_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
'B': PyrexTypes.c_py_buffer_ptr_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
type_to_format_map = dict([(type_, format_)
for format_, type_ in format_map.iteritems()])
error_value_map = {
'O': "NULL",
'T': "NULL",
'i': "-1",
'b': "-1",
'l': "-1",
'r': "-1",
'h': "-1",
'z': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
self.exception_check = ret_format != 'r' and self.error_value is not None
self.is_staticmethod = False
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
def format_from_type(self, arg_type):
if arg_type.is_pyobject:
arg_type = PyrexTypes.py_object_type
return self.type_to_format_map[arg_type]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
for i in xrange(self.num_fixed_args()):
if self_arg_override is not None and self.is_self_arg(i):
assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
args.append(self_arg_override)
else:
arg_type = self.fixed_arg_type(i)
args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
if self_arg_override is not None and self.returns_self_type():
ret_type = self_arg_override.type
else:
ret_type = self.return_type()
exc_value = self.exception_value()
return PyrexTypes.CFuncType(
ret_type, args, exception_value=exc_value,
exception_check=self.exception_check)
def method_flags(self):
if self.ret_format == "O":
full_args = self.fixed_arg_format
if self.has_dummy_arg:
full_args = "O" + full_args
if full_args in ["O", "T"]:
if self.has_generic_args:
return [method_varargs, method_keywords]
else:
return [method_noargs]
elif full_args in ["OO", "TO"] and not self.has_generic_args:
return [method_onearg]
if self.is_staticmethod:
return [method_varargs, method_keywords]
return None
class SlotDescriptor(object):
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# is_inherited Is inherited by subtypes (see PyType_Ready())
# py3 Indicates presence of slot in Python 3
# py2 Indicates presence of slot in Python 2
# ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
def __init__(self, slot_name, dynamic=False, inherited=False,
py3=True, py2=True, ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.is_inherited = inherited
self.ifdef = ifdef
self.py3 = py3
self.py2 = py2
def preprocessor_guard_code(self):
ifdef = self.ifdef
py2 = self.py2
py3 = self.py3
guard = None
if ifdef:
guard = ("#if %s" % ifdef)
elif not py3 or py3 == '<RESERVED>':
guard = ("#if PY_MAJOR_VERSION < 3")
elif not py2:
guard = ("#if PY_MAJOR_VERSION >= 3")
return guard
def generate(self, scope, code):
end_pypy_guard = False
if self.is_initialised_dynamically:
value = "0"
else:
value = self.slot_code(scope)
if value == "0" and self.is_inherited:
# PyPy currently has a broken PyType_Ready() that fails to
# inherit some slots. To work around this, we explicitly
# set inherited slots here, but only in PyPy since CPython
# handles this better than we do.
inherited_value = value
current_scope = scope
while (inherited_value == "0"
and current_scope.parent_type
and current_scope.parent_type.base_type
and current_scope.parent_type.base_type.scope):
current_scope = current_scope.parent_type.base_type.scope
inherited_value = self.slot_code(current_scope)
if inherited_value != "0":
code.putln("#if CYTHON_COMPILING_IN_PYPY")
code.putln("%s, /*%s*/" % (inherited_value, self.slot_name))
code.putln("#else")
end_pypy_guard = True
preprocessor_guard = self.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln("%s, /*%s*/" % (value, self.slot_name))
if self.py3 == '<RESERVED>':
code.putln("#else")
code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
if end_pypy_guard:
code.putln("#endif")
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None, inherited=True):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2,
ifdef=ifdef, inherited=inherited)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.alternatives = []
method_name_to_slot[method_name] = self
#
if fallback:
self.alternatives.append(fallback)
for alt in (self.py2, self.py3):
if isinstance(alt, (tuple, list)):
slot_name, method_name = alt
self.alternatives.append(method_name)
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.func_cname:
return entry.func_cname
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Cython.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if not scope.has_cyclic_pyobject_attrs:
# if the type does not have GC relevant object attributes, it can
# delegate GC methods to its parent - iff the parent functions
# are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class GCClearReferencesSlot(GCDependentSlot):
def slot_code(self, scope):
if scope.needs_tp_clear():
return GCDependentSlot.slot_code(self, scope)
return "0"
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
if (self.slot_name != 'tp_new'
and scope.parent_type.base_type
and not scope.has_pyobject_attrs
and not scope.has_memoryview_attrs
and not scope.lookup_here(self.method)):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT"
if scope.directives['type_version_tag']:
# it's not in 'Py_TPFLAGS_DEFAULT' in Py2
value += "|Py_TPFLAGS_HAVE_VERSION_TAG"
else:
# it's enabled in 'Py_TPFLAGS_DEFAULT' in Py3
value = "(%s&~Py_TPFLAGS_HAVE_VERSION_TAG)" % value
value += "|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.parent_type.is_final_type:
value += "|Py_TPFLAGS_BASETYPE"
if scope.needs_gc():
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '__Pyx_DOCSTR("%s")' % StringEncoding.escape_byte_string(doc)
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def is_empty(self, scope):
for slot in self.sub_slots:
if slot.slot_code(scope) != "0":
return False
return True
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
if not self.is_empty(scope):
return "&%s" % self.substructure_cname(scope)
return "0"
def generate_substructure(self, scope, code):
if not self.is_empty(scope):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
if scope.pyfunc_entries:
return scope.method_table_cname
else:
return "0"
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
def get_base_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
if parent_slot != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return parent_slot
return None
def get_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
slot_code = slot.slot_code(scope)
if slot_code != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.name)
if entry.visibility != 'extern':
return slot_code
return None
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for simple Python functions.
#
#------------------------------------------------------------------------------------------
pyfunction_noargs = Signature("-", "O")
pyfunction_onearg = Signature("-O", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
intintargproc = Signature("Tii", 'r')
ssizessizeargproc = Signature("Tzz", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__", py3 = False),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce", py3 = False),
MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", py3 = False),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
# Added in release 2.5
MethodSlot(unaryfunc, "nb_index", "__index__", ifdef = "PY_VERSION_HEX >= 0x02050000")
)
PySequenceMethods = (
MethodSlot(lenfunc, "sq_length", "__len__"),
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(lenfunc, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000"),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000")
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
ConstructorSlot("tp_dealloc", '__dealloc__'),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__", inherited=False), # Py3 checks for __richcmp__
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
GCDependentSlot("tp_traverse"),
GCClearReferencesSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag", ifdef="PY_VERSION_HEX >= 0x02060000"),
EmptySlot("tp_finalize", ifdef="PY_VERSION_HEX >= 0x030400a1"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Cython.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__cinit__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
method_varargs = "METH_VARARGS"
method_keywords = "METH_KEYWORDS"
method_coexist = "METH_COEXIST"
| bsd-3-clause |
avi-psvm-dutta/eSim | src/kicadtoNgspice/KicadtoNgspice.py | 1 | 30713 | #===============================================================================
#
# FILE: kicadtoNgspice.py
#
# USAGE: ---
#
# DESCRIPTION: This define all configuration used in Application.
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Fahim Khan, fahim.elex@gmail.com
# ORGANIZATION: eSim team at FOSSEE, IIT Bombay.
# CREATED: Wednesday 04 March 2015
# REVISION: ---
#===============================================================================
import sys
import os
from PyQt4 import QtGui,QtCore
from Processing import PrcocessNetlist
import Analysis
import Source
import Model
import DeviceModel
import SubcircuitTab
import Convert
import TrackWidget
from xml.etree import ElementTree as ET
class MainWindow(QtGui.QWidget):
"""
This class create KicadtoNgspice window.
And Call Convert function if convert button is pressed.
The convert function takes all the value entered by user and create a final netlist "*.cir.out".
This final netlist is compatible with NgSpice.
"""
def __init__(self,clarg1,clarg2=None):
QtGui.QWidget.__init__(self)
print "=================================="
print "Kicad to Ngspice netlist converter "
print "=================================="
global kicadNetlist,schematicInfo
global infoline,optionInfo
self.kicadFile = clarg1
self.clarg1=clarg1
self.clarg2=clarg2
#Object of Processing
obj_proc = PrcocessNetlist()
# Read the netlist
kicadNetlist = obj_proc.readNetlist(self.kicadFile)
# Construct parameter information
param = obj_proc.readParamInfo(kicadNetlist)
# Replace parameter with values
netlist,infoline = obj_proc.preprocessNetlist(kicadNetlist,param)
print "NETLIST ",netlist
print "INFOLINE",infoline
# Separate option and schematic information
optionInfo, schematicInfo = obj_proc.separateNetlistInfo(netlist)
print "OPTIONINFO",optionInfo
print "SCHEMATICINFO",schematicInfo
#List for storing source and its value
global sourcelist, sourcelisttrack
sourcelist=[]
sourcelisttrack=[]
schematicInfo,sourcelist = obj_proc.insertSpecialSourceParam(schematicInfo,sourcelist)
print "SOURCELIST",sourcelist
print "SCHEMATICINFO",schematicInfo
#List storing model detail
global modelList,outputOption,unknownModelList,multipleModelList
modelList = []
outputOption = []
schematicInfo,outputOption,modelList,unknownModelList,multipleModelList = obj_proc.convertICintoBasicBlocks(schematicInfo,outputOption,modelList)
print "Unknown Model List",unknownModelList
print "Multiple Model List",multipleModelList
print "Model List",modelList
"""
args=[clarg1,clarg2]
app = QtGui.QApplication(args)
kingWindow = MainWindow()
sys.exit(app.exec_())
"""
#Create object of track widget
self.obj_track = TrackWidget.TrackWidget()
"""
Checking if any unknown model is used in schematic which is not recognized by NgSpice.
Also if the two model of same name is present under modelParamXML directory
"""
if unknownModelList:
print "Unknown Model List is : ",unknownModelList
self.msg = QtGui.QErrorMessage()
self.content = "Your schematic contain unknown model "+', '.join(unknownModelList)
self.msg.showMessage(self.content)
self.msg.setWindowTitle("Unknown Models")
elif multipleModelList:
print "Multiple Model List is : ",multipleModelList
self.msg = QtGui.QErrorMessage()
self.mcontent = "Look like you have duplicate model in modelParamXML directory "+', '.join(multipleModelList[0])
self.msg.showMessage(self.mcontent)
self.msg.setWindowTitle("Multiple Models")
else:
self.createMainWindow()
print "Init Schematic Info",schematicInfo
initial_schematicInfo = schematicInfo
def createMainWindow(self):
"""
This function create main window of Kicad to Ngspice converter
"""
self.vbox = QtGui.QVBoxLayout(self)
self.hbox=QtGui.QHBoxLayout(self)
self.hbox.addStretch(1)
self.convertbtn = QtGui.QPushButton("Convert")
self.convertbtn.clicked.connect(self.callConvert)
#self.cancelbtn = QtGui.QPushButton("Cancel")
#self.cancelbtn.clicked.connect(self.closeCancel)
self.hbox.addWidget(self.convertbtn)
self.vbox.addWidget(self.createcreateConvertWidget())
self.vbox.addLayout(self.hbox)
#self.grid.addWidget(self.cancelbtn,1,1)
#self.setWindowState(QtCore.Qt.WindowMaximized)
self.setLayout(self.vbox)
self.setWindowTitle("Kicad To NgSpice Converter")
self.show()
"""
def closeCancel(self):
self.obj_track.modelTrack[:]=[]
self.obj_track.subcircuitList[:]=[]
self.close()
"""
def createcreateConvertWidget(self):
global obj_analysis
self.convertWindow = QtGui.QWidget()
self.analysisTab = QtGui.QScrollArea()
obj_analysis=Analysis.Analysis(self.clarg1)
self.analysisTab.setWidget(obj_analysis)
#self.analysisTabLayout = QtGui.QVBoxLayout(self.analysisTab.widget())
self.analysisTab.setWidgetResizable(True)
global obj_source
self.sourceTab = QtGui.QScrollArea()
obj_source=Source.Source(sourcelist,sourcelisttrack,self.clarg1)
self.sourceTab.setWidget(obj_source)
#self.sourceTabLayout = QtGui.QVBoxLayout(self.sourceTab.widget())
self.sourceTab.setWidgetResizable(True)
global obj_model
self.modelTab = QtGui.QScrollArea()
obj_model=Model.Model(schematicInfo,modelList,self.clarg1)
self.modelTab.setWidget(obj_model)
#self.modelTabLayout = QtGui.QVBoxLayout(self.modelTab.widget())
self.modelTab.setWidgetResizable(True)
global obj_devicemodel
self.deviceModelTab = QtGui.QScrollArea()
obj_devicemodel=DeviceModel.DeviceModel(schematicInfo,self.clarg1)
self.deviceModelTab.setWidget(obj_devicemodel)
self.deviceModelTab.setWidgetResizable(True)
global obj_subcircuitTab
self.subcircuitTab = QtGui.QScrollArea()
obj_subcircuitTab = SubcircuitTab.SubcircuitTab(schematicInfo,self.clarg1)
self.subcircuitTab.setWidget(obj_subcircuitTab)
self.subcircuitTab.setWidgetResizable(True)
self.tabWidget = QtGui.QTabWidget()
#self.tabWidget.TabShape(QtGui.QTabWidget.Rounded)
self.tabWidget.addTab(self.analysisTab,"Analysis")
self.tabWidget.addTab(self.sourceTab,"Source Details")
self.tabWidget.addTab(self.modelTab,"NgSpice Model")
self.tabWidget.addTab(self.deviceModelTab,"Device Modeling")
self.tabWidget.addTab(self.subcircuitTab,"Subcircuits")
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.addWidget(self.tabWidget)
#self.mainLayout.addStretch(1)
self.convertWindow.setLayout(self.mainLayout)
self.convertWindow.show()
return self.convertWindow
def callConvert(self):
"""
Calling Convert Class Constructor
"""
global schematicInfo
global analysisoutput
global kicad
store_schematicInfo = list(schematicInfo)
(projpath,filename)=os.path.split(self.kicadFile)
project_name=os.path.basename(projpath)
print "PROJ PATH---",projpath
check=1
try:
fr=open(os.path.join(projpath,project_name+"_Previous_Values.xml"),'r')
temp_tree=ET.parse(fr)
temp_root=temp_tree.getroot()
except:
check=0
fw=open(os.path.join(projpath,project_name+"_Previous_Values.xml"),'w')
if check==0:
attr_parent=ET.Element("KicadtoNgspice")
if check==1:
attr_parent=temp_root
for child in attr_parent:
if child.tag=="analysis":
attr_parent.remove(child)
attr_analysis=ET.SubElement(attr_parent,"analysis")
attr_ac=ET.SubElement(attr_analysis,"ac")
if obj_analysis.Lin.isChecked():
ET.SubElement(attr_ac,"field1",name="Lin").text="true"
ET.SubElement(attr_ac,"field2",name="Dec").text="false"
ET.SubElement(attr_ac,"field3",name="Oct").text="false"
elif obj_analysis.Dec.isChecked():
ET.SubElement(attr_ac,"field1",name="Lin").text="false"
ET.SubElement(attr_ac,"field2",name="Dec").text="true"
ET.SubElement(attr_ac,"field3",name="Oct").text="false"
if obj_analysis.Oct.isChecked():
ET.SubElement(attr_ac,"field1",name="Lin").text="false"
ET.SubElement(attr_ac,"field2",name="Dec").text="false"
ET.SubElement(attr_ac,"field3",name="Oct").text="true"
else:
pass
ET.SubElement(attr_ac,"field4",name="Start Frequency").text= str(obj_analysis.ac_entry_var[0].text())
ET.SubElement(attr_ac,"field5",name="Stop Frequency").text= str(obj_analysis.ac_entry_var[1].text())
ET.SubElement(attr_ac,"field6",name="No. of points").text= str(obj_analysis.ac_entry_var[2].text())
ET.SubElement(attr_ac,"field7",name="Start Fre Combo").text= obj_analysis.ac_parameter[0]
ET.SubElement(attr_ac,"field8",name="Stop Fre Combo").text= obj_analysis.ac_parameter[1]
attr_dc=ET.SubElement(attr_analysis,"dc")
ET.SubElement(attr_dc,"field1",name="Source Name").text= str(obj_analysis.dc_entry_var[0].text())
ET.SubElement(attr_dc,"field2",name="Start").text= str(obj_analysis.dc_entry_var[1].text())
ET.SubElement(attr_dc,"field3",name="Increment").text= str(obj_analysis.dc_entry_var[2].text())
ET.SubElement(attr_dc,"field4",name="Stop").text= str(obj_analysis.dc_entry_var[3].text())
ET.SubElement(attr_dc,"field5",name="Operating Point").text=str(self.obj_track.op_check[-1])
print "OBJ_ANALYSIS.CHECK -----",self.obj_track.op_check[-1]
ET.SubElement(attr_dc,"field6",name="Start Combo").text= obj_analysis.dc_parameter[0]
ET.SubElement(attr_dc,"field7",name="Increment Combo").text=obj_analysis.dc_parameter[1]
ET.SubElement(attr_dc,"field8",name="Stop Combo").text= obj_analysis.dc_parameter[2]
attr_tran=ET.SubElement(attr_analysis,"tran")
ET.SubElement(attr_tran,"field1",name="Start Time").text= str(obj_analysis.tran_entry_var[0].text())
ET.SubElement(attr_tran,"field2",name="Step Time").text= str(obj_analysis.tran_entry_var[1].text())
ET.SubElement(attr_tran,"field3",name="Stop Time").text= str(obj_analysis.tran_entry_var[2].text())
ET.SubElement(attr_tran,"field4",name="Start Combo").text= obj_analysis.tran_parameter[0]
ET.SubElement(attr_tran,"field5",name="Step Combo").text= obj_analysis.tran_parameter[1]
ET.SubElement(attr_tran,"field6",name="Stop Combo").text= obj_analysis.tran_parameter[2]
print "TRAN PARAMETER 2-----",obj_analysis.tran_parameter[2]
#tree=ET.ElementTree(attr_analysis)
#tree.write(f)
if check==0:
attr_source=ET.SubElement(attr_parent,"source")
if check==1:
for child in attr_parent:
if child.tag=="source":
attr_source=child
count=1
grand_child_count=1
#global tmp_check
#tmp_check=0
for i in store_schematicInfo:
tmp_check=0
words=i.split(' ')
wordv=words[0]
for child in attr_source:
if child.tag==wordv and child.text==words[len(words)-1]:
tmp_check=1
for grand_child in child:
grand_child.text=str(obj_source.entry_var[grand_child_count].text())
grand_child_count=grand_child_count+1
grand_child_count=grand_child_count+1
if tmp_check==0:
words=i.split(' ')
wordv=words[0]
if wordv[0]=="v":
attr_var=ET.SubElement(attr_source,words[0],name="Source type")
attr_var.text=words[len(words)-1]
#ET.SubElement(attr_ac,"field1",name="Lin").text="true"
if words[len(words)-1]=="ac":
#attr_ac=ET.SubElement(attr_var,"ac")
ET.SubElement(attr_var,"field1",name="Amplitude").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var, "field2", name = "Phase").text = str(obj_source.entry_var[count].text())
count=count+2
elif words[len(words)-1]=="dc":
#attr_dc=ET.SubElement(attr_var,"dc")
ET.SubElement(attr_var,"field1",name="Value").text=str(obj_source.entry_var[count].text())
count=count+2
elif words[len(words)-1]=="sine":
#attr_sine=ET.SubElement(attr_var,"sine")
ET.SubElement(attr_var,"field1",name="Offset Value").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field2",name="Amplitude").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field3",name="Frequency").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field4",name="Delay Time").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field5",name="Damping Factor").text=str(obj_source.entry_var[count].text())
count=count+2
elif words[len(words)-1]=="pulse":
#attr_pulse=ET.SubElement(attr_var,"pulse")
ET.SubElement(attr_var,"field1",name="Initial Value").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field2",name="Pulse Value").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field3",name="Delay Time").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field4",name="Rise Time").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field5",name="Fall Time").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field5",name="Pulse width").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field5",name="Period").text=str(obj_source.entry_var[count].text())
count=count+2
elif words[len(words)-1]=="pwl":
#attr_pwl=ET.SubElement(attr_var,"pwl")
ET.SubElement(attr_var,"field1",name="Enter in pwl format").text=str(obj_source.entry_var[count].text())
count=count+2
elif words[len(words)-1]=="exp":
#attr_exp=ET.SubElement(attr_var,"exp")
ET.SubElement(attr_var,"field1",name="Initial Value").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field2",name="Pulsed Value").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field3",name="Rise Delay Time").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field4",name="Rise Time Constant").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field5",name="Fall TIme").text=str(obj_source.entry_var[count].text())
count=count+1
ET.SubElement(attr_var,"field6",name="Fall Time Constant").text=str(obj_source.entry_var[count].text())
count=count+2
else:
pass
#tree=ET.ElementTree(attr_source)
#tree.write(f1)
if check==0:
attr_model=ET.SubElement(attr_parent,"model")
if check==1:
for child in attr_parent:
if child.tag=="model":
attr_model=child
i=0
#tmp_check is a variable to check for duplicates in the xml file
tmp_check=0
#tmp_i is the iterator in case duplicates are there; then in that case we need to replace only the child node and not create a new parent node
for line in modelList:
print "i for each line in model List------",i
tmp_check=0
for rand_itr in obj_model.obj_trac.modelTrack:
if rand_itr[2]==line[2] and rand_itr[3]==line[3]:
start=rand_itr[7]
end=rand_itr[8]
i=start
for child in attr_model:
if child.text==line[2] and child.tag==line[3]:
for grand_child in child:
if i<=end:
grand_child.text=str(obj_model.obj_trac.model_entry_var[i].text())
print "STR OF MODEL----",str(obj_model.obj_trac.model_entry_var[i].text())
i=i+1
print "i incremented to ",i
else:
pass
tmp_check=1
if tmp_check==0:
attr_ui=ET.SubElement(attr_model,line[3],name="type")
attr_ui.text=line[2]
for key,value in line[7].iteritems():
if hasattr(value, '__iter__') and i<=end:
for item in value:
ET.SubElement(attr_ui,"field"+str(i+1),name=item).text=str(obj_model.obj_trac.model_entry_var[i].text())
print "STR OF MODEL----",str(obj_model.obj_trac.model_entry_var[i].text())
i=i+1
print "i incremented to ",i
else:
ET.SubElement(attr_ui,"field"+str(i+1),name=value).text=str(obj_model.obj_trac.model_entry_var[i].text())
print "STR OF MODEL----",str(obj_model.obj_trac.model_entry_var[i].text())
i=i+1
print "i incremented to ",i
#################################################################################################################
if check==0:
attr_devicemodel=ET.SubElement(attr_parent,"devicemodel")
if check==1:
for child in attr_parent:
if child.tag=="devicemodel":
del child[:]
attr_devicemodel=child
#print "Device model dict",obj_devicemodel.devicemodel_dict_beg
#print "Device model dict end",obj_devicemodel.devicemodel_dict_end
##########################
for i in obj_devicemodel.devicemodel_dict_beg:
attr_var=ET.SubElement(attr_devicemodel,i)
it=obj_devicemodel.devicemodel_dict_beg[i]
end=obj_devicemodel.devicemodel_dict_end[i]
while it<=end:
ET.SubElement(attr_var,"field").text=str(obj_devicemodel.entry_var[it].text())
it=it+1
#####################################
"""keys=obj_devicemodel.devicemodel_dict.keys()
n=len(keys)
for i in range(n):
thisKey=keys[i]
nextKey=keys[(i+1)%n]
nextValue=obj_devicemodel.devicemodel_dict[nextKey]
attr_var=ET.SubElement(attr_devicemodel,thisKey)
it=obj_devicemodel.devicemodel_dict[thisKey]
while it<=nextValue:
ET.SubElement(attr_var,"field").text=obj_devicemodel.entry_var[it]"""
###################################################################################################################
if check==0:
attr_subcircuit=ET.SubElement(attr_parent,"subcircuit")
if check==1:
for child in attr_parent:
if child.tag=="subcircuit":
del child[:]
attr_subcircuit=child
##########################
for i in obj_subcircuitTab.subcircuit_dict_beg:
attr_var=ET.SubElement(attr_subcircuit,i)
it=obj_subcircuitTab.subcircuit_dict_beg[i]
end=obj_subcircuitTab.subcircuit_dict_end[i]
while it<=end:
ET.SubElement(attr_var,"field").text=str(obj_subcircuitTab.entry_var[it].text())
it=it+1
tree=ET.ElementTree(attr_parent)
tree.write(fw)
self.obj_convert = Convert.Convert(self.obj_track.sourcelisttrack["ITEMS"],
self.obj_track.source_entry_var["ITEMS"],
store_schematicInfo,self.clarg1)
try:
#Adding Source Value to Schematic Info
store_schematicInfo = self.obj_convert.addSourceParameter()
#Adding Model Value to store_schematicInfo
store_schematicInfo = self.obj_convert.addModelParameter(store_schematicInfo)
#Adding Device Library to SchematicInfo
store_schematicInfo = self.obj_convert.addDeviceLibrary(store_schematicInfo,self.kicadFile)
#Adding Subcircuit Library to SchematicInfo
store_schematicInfo = self.obj_convert.addSubcircuit(store_schematicInfo, self.kicadFile)
analysisoutput = self.obj_convert.analysisInsertor(self.obj_track.AC_entry_var["ITEMS"],
self.obj_track.DC_entry_var["ITEMS"],
self.obj_track.TRAN_entry_var["ITEMS"],
self.obj_track.set_CheckBox["ITEMS"],
self.obj_track.AC_Parameter["ITEMS"],
self.obj_track.DC_Parameter["ITEMS"],
self.obj_track.TRAN_Parameter["ITEMS"],
self.obj_track.AC_type["ITEMS"],
self.obj_track.op_check)
#print "SchematicInfo after adding Model Details",schematicInfo
print "Analysis OutPut------>",analysisoutput
#Calling netlist file generation function
self.createNetlistFile(store_schematicInfo)
self.msg = "The Kicad to Ngspice Conversion completed successfully!!!!!!"
QtGui.QMessageBox.information(self, "Information", self.msg, QtGui.QMessageBox.Ok)
#self.obj_track.subcircuitList[:]=[]
#self.obj_track.modelTrack[:]=[]
#self.close()
except Exception as e:
print "Exception Message: ",e
print "There was error while converting kicad to ngspice"
self.close()
# Generate .sub file from .cir.out file if it is a subcircuit
subPath = os.path.splitext(self.kicadFile)[0]
if self.clarg2 == "sub":
self.createSubFile(subPath)
def createNetlistFile(self,store_schematicInfo):
print "Creating Final netlist"
#print "INFOLINE",infoline
#print "OPTIONINFO",optionInfo
#print "Device MODEL LIST ",devicemodelList
#print "SUBCKT ",subcktList
#print "OUTPUTOPTION",outputOption
#print "KicadfIle",kicadFile
store_optionInfo = list(optionInfo) #To avoid writing optionInfo twice in final netlist
#checking if analysis files is present
(projpath,filename) = os.path.split(self.kicadFile)
analysisFileLoc = os.path.join(projpath,"analysis")
#print "Analysis File Location",analysisFileLoc
if os.path.exists(analysisFileLoc):
try:
f = open(analysisFileLoc)
#Read data
data = f.read()
# Close the file
f.close()
except :
print "Error While opening Project Analysis file. Please check it"
sys.exit()
else:
print analysisFileLoc + " does not exist"
sys.exit()
#Adding analysis file info to optionInfo
analysisData=data.splitlines()
for eachline in analysisData:
eachline=eachline.strip()
if len(eachline)>1:
if eachline[0]=='.':
store_optionInfo.append(eachline)
else:
pass
#print "Option Info",optionInfo
analysisOption = []
initialCondOption=[]
simulatorOption =[]
#includeOption=[] #Don't know why to use it
#model = [] #Don't know why to use it
for eachline in store_optionInfo:
words=eachline.split()
option=words[0]
if (option=='.ac' or option=='.dc' or option=='.disto' or option=='.noise' or
option=='.op' or option=='.pz' or option=='.sens' or option=='.tf' or
option=='.tran'):
analysisOption.append(eachline+'\n')
elif (option=='.save' or option=='.print' or option=='.plot' or option=='.four'):
eachline=eachline.strip('.')
outputOption.append(eachline+'\n')
elif (option=='.nodeset' or option=='.ic'):
initialCondOption.append(eachline+'\n')
elif option=='.option':
simulatorOption.append(eachline+'\n')
#elif (option=='.include' or option=='.lib'):
# includeOption.append(eachline+'\n')
#elif (option=='.model'):
# model.append(eachline+'\n')
elif option=='.end':
continue;
#Start creating final netlist cir.out file
outfile = self.kicadFile+".out"
out=open(outfile,"w")
out.writelines(infoline)
out.writelines('\n')
sections=[simulatorOption, initialCondOption, store_schematicInfo, analysisOption]
print "SECTIONS",sections
for section in sections:
if len(section) == 0:
continue
else:
for line in section:
out.writelines('\n')
out.writelines(line)
out.writelines('\n* Control Statements \n')
out.writelines('.control\n')
out.writelines('run\n')
#out.writelines(outputOption)
out.writelines('print allv > plot_data_v.txt\n')
out.writelines('print alli > plot_data_i.txt\n')
out.writelines('.endc\n')
out.writelines('.end\n')
out.close()
def createSubFile(self,subPath):
self.project = subPath
self.projName = os.path.basename(self.project)
if os.path.exists(self.project+".cir.out"):
try:
f = open(self.project+".cir.out")
except :
print("Error in opening .cir.out file.")
else:
print self.projName + ".cir.out does not exist. Please create a spice netlist."
# Read the data from file
data=f.read()
# Close the file
f.close()
newNetlist=[]
netlist=iter(data.splitlines())
for eachline in netlist:
eachline=eachline.strip()
if len(eachline)<1:
continue
words=eachline.split()
if eachline[2] == 'u':
if words[len(words)-1] == "port":
subcktInfo = ".subckt "+self.projName+" "
for i in range(2,len(words)-1):
subcktInfo+=words[i]+" "
continue
if words[0] == ".end" or words[0] == ".ac" or words[0] == ".dc" or words[0] == ".tran" or words[0] == '.disto' or words[0] == '.noise' or words[0] == '.op' or words[0] == '.pz' or words[0] == '.sens' or words[0] == '.tf':
continue
elif words[0] == ".control":
while words[0] != ".endc":
eachline=netlist.next()
eachline=eachline.strip()
if len(eachline)<1:
continue
words=eachline.split()
else:
newNetlist.append(eachline)
outfile=self.project+".sub"
out=open(outfile,"w")
out.writelines("* Subcircuit " + self.projName)
out.writelines('\n')
out.writelines(subcktInfo)
out.writelines('\n')
for i in range(len(newNetlist),0,-1):
newNetlist.insert(i,'\n')
out.writelines(newNetlist)
out.writelines('\n')
out.writelines('.ends ' + self.projName)
print "The subcircuit has been written in "+self.projName+".sub"
| gpl-3.0 |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/tools/third_party/pluggy/testing/test_pluginmanager.py | 43 | 8644 | import pytest
import types
from pluggy import (PluginValidationError,
HookCallError, HookimplMarker, HookspecMarker)
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def test_plugin_double_register(pm):
pm.register(42, name="abc")
with pytest.raises(ValueError):
pm.register(42, name="abc")
with pytest.raises(ValueError):
pm.register(42, name="def")
def test_pm(pm):
class A(object):
pass
a1, a2 = A(), A()
pm.register(a1)
assert pm.is_registered(a1)
pm.register(a2, "hello")
assert pm.is_registered(a2)
out = pm.get_plugins()
assert a1 in out
assert a2 in out
assert pm.get_plugin('hello') == a2
assert pm.unregister(a1) == a1
assert not pm.is_registered(a1)
out = pm.list_name_plugin()
assert len(out) == 1
assert out == [("hello", a2)]
def test_has_plugin(pm):
class A(object):
pass
a1 = A()
pm.register(a1, 'hello')
assert pm.is_registered(a1)
assert pm.has_plugin('hello')
def test_register_dynamic_attr(he_pm):
class A(object):
def __getattr__(self, name):
if name[0] != "_":
return 42
raise AttributeError()
a = A()
he_pm.register(a)
assert not he_pm.get_hookcallers(a)
def test_pm_name(pm):
class A(object):
pass
a1 = A()
name = pm.register(a1, name="hello")
assert name == "hello"
pm.unregister(a1)
assert pm.get_plugin(a1) is None
assert not pm.is_registered(a1)
assert not pm.get_plugins()
name2 = pm.register(a1, name="hello")
assert name2 == name
pm.unregister(name="hello")
assert pm.get_plugin(a1) is None
assert not pm.is_registered(a1)
assert not pm.get_plugins()
def test_set_blocked(pm):
class A(object):
pass
a1 = A()
name = pm.register(a1)
assert pm.is_registered(a1)
assert not pm.is_blocked(name)
pm.set_blocked(name)
assert pm.is_blocked(name)
assert not pm.is_registered(a1)
pm.set_blocked("somename")
assert pm.is_blocked("somename")
assert not pm.register(A(), "somename")
pm.unregister(name="somename")
assert pm.is_blocked("somename")
def test_register_mismatch_method(he_pm):
class hello(object):
@hookimpl
def he_method_notexists(self):
pass
he_pm.register(hello())
with pytest.raises(PluginValidationError):
he_pm.check_pending()
def test_register_mismatch_arg(he_pm):
class hello(object):
@hookimpl
def he_method1(self, qlwkje):
pass
with pytest.raises(PluginValidationError):
he_pm.register(hello())
def test_register(pm):
class MyPlugin(object):
pass
my = MyPlugin()
pm.register(my)
assert my in pm.get_plugins()
my2 = MyPlugin()
pm.register(my2)
assert set([my, my2]).issubset(pm.get_plugins())
assert pm.is_registered(my)
assert pm.is_registered(my2)
pm.unregister(my)
assert not pm.is_registered(my)
assert my not in pm.get_plugins()
def test_register_unknown_hooks(pm):
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
return arg + 1
pname = pm.register(Plugin1())
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
# assert not pm._unverified_hooks
assert pm.hook.he_method1(arg=1) == [2]
assert len(pm.get_hookcallers(pm.get_plugin(pname))) == 1
def test_register_historic(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
pm.hook.he_method1.call_historic(kwargs=dict(arg=1))
out = []
class Plugin(object):
@hookimpl
def he_method1(self, arg):
out.append(arg)
pm.register(Plugin())
assert out == [1]
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
out.append(arg * 10)
pm.register(Plugin2())
assert out == [1, 10]
pm.hook.he_method1.call_historic(kwargs=dict(arg=12))
assert out == [1, 10, 120, 12]
def test_with_result_memorized(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
he_method1 = pm.hook.he_method1
he_method1.call_historic(lambda res: out.append(res), dict(arg=1))
out = []
class Plugin(object):
@hookimpl
def he_method1(self, arg):
return arg * 10
pm.register(Plugin())
assert out == [10]
def test_with_callbacks_immediately_executed(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
return arg * 10
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
return arg * 20
class Plugin3(object):
@hookimpl
def he_method1(self, arg):
return arg * 30
out = []
pm.register(Plugin1())
pm.register(Plugin2())
he_method1 = pm.hook.he_method1
he_method1.call_historic(lambda res: out.append(res), dict(arg=1))
assert out == [20, 10]
pm.register(Plugin3())
assert out == [20, 10, 30]
def test_register_historic_incompat_hookwrapper(pm):
class Hooks(object):
@hookspec(historic=True)
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
out = []
class Plugin(object):
@hookimpl(hookwrapper=True)
def he_method1(self, arg):
out.append(arg)
with pytest.raises(PluginValidationError):
pm.register(Plugin())
def test_call_extra(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
def he_method1(arg):
return arg * 10
out = pm.hook.he_method1.call_extra([he_method1], dict(arg=1))
assert out == [10]
def test_call_with_too_few_args(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
0 / 0
pm.register(Plugin1())
with pytest.raises(HookCallError):
with pytest.warns(UserWarning):
pm.hook.he_method1()
def test_subset_hook_caller(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
out = []
class Plugin1(object):
@hookimpl
def he_method1(self, arg):
out.append(arg)
class Plugin2(object):
@hookimpl
def he_method1(self, arg):
out.append(arg * 10)
class PluginNo(object):
pass
plugin1, plugin2, plugin3 = Plugin1(), Plugin2(), PluginNo()
pm.register(plugin1)
pm.register(plugin2)
pm.register(plugin3)
pm.hook.he_method1(arg=1)
assert out == [10, 1]
out[:] = []
hc = pm.subset_hook_caller("he_method1", [plugin1])
hc(arg=2)
assert out == [20]
out[:] = []
hc = pm.subset_hook_caller("he_method1", [plugin2])
hc(arg=2)
assert out == [2]
out[:] = []
pm.unregister(plugin1)
hc(arg=2)
assert out == []
out[:] = []
pm.hook.he_method1(arg=1)
assert out == [10]
def test_multicall_deprecated(pm):
class P1(object):
@hookimpl
def m(self, __multicall__, x):
pass
pytest.deprecated_call(pm.register, P1())
def test_add_hookspecs_nohooks(pm):
with pytest.raises(ValueError):
pm.add_hookspecs(10)
def test_reject_prefixed_module(pm):
"""Verify that a module type attribute that contains the project
prefix in its name (in this case `'example_*'` isn't collected
when registering a module which imports it.
"""
pm._implprefix = 'example'
conftest = types.ModuleType("conftest")
src = ("""
def example_hook():
pass
""")
exec(src, conftest.__dict__)
conftest.example_blah = types.ModuleType("example_blah")
name = pm.register(conftest)
assert name == 'conftest'
assert getattr(pm.hook, 'example_blah', None) is None
assert getattr(pm.hook, 'example_hook', None) # conftest.example_hook should be collected
assert pm.parse_hookimpl_opts(conftest, 'example_blah') is None
assert pm.parse_hookimpl_opts(conftest, 'example_hook') == {}
| bsd-3-clause |
40223149/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/__init__.py | 603 | 6082 | ## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import * #brython fix me
from pygame.base import *
from pygame.constants import *
from pygame.version import *
from pygame.rect import Rect
import pygame.color
Color = pygame.color.Color
__version__ = ver
#added by earney
from . import time
from . import display
from . import constants
from . import event
from . import font
from . import mixer
from . import sprite
from .surface import Surface
from . import image
from . import mouse
from . import transform
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
'''
try: import pygame.cdrom
except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1)
try: import pygame.cursors
except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1)
try: import pygame.display
except (ImportError,IOError), msg:display=MissingModule("display", msg, 1)
try: import pygame.draw
except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1)
try: import pygame.event
except (ImportError,IOError), msg:event=MissingModule("event", msg, 1)
try: import pygame.image
except (ImportError,IOError), msg:image=MissingModule("image", msg, 1)
try: import pygame.joystick
except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1)
try: import pygame.key
except (ImportError,IOError), msg:key=MissingModule("key", msg, 1)
try: import pygame.mouse
except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1)
try: import pygame.sprite
except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1)
try: from pygame.surface import Surface
except (ImportError,IOError):Surface = lambda:Missing_Function
try: from pygame.overlay import Overlay
except (ImportError,IOError):Overlay = lambda:Missing_Function
try: import pygame.time
except (ImportError,IOError), msg:time=MissingModule("time", msg, 1)
try: import pygame.transform
except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1)
#lastly, the "optional" pygame modules
try:
import pygame.font
import pygame.sysfont
pygame.font.SysFont = pygame.sysfont.SysFont
pygame.font.get_fonts = pygame.sysfont.get_fonts
pygame.font.match_font = pygame.sysfont.match_font
except (ImportError,IOError), msg:font=MissingModule("font", msg, 0)
try: import pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#try: import pygame.movie
#except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0)
#try: import pygame.movieext
#except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0)
try: import pygame.surfarray
except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0)
try: import pygame.sndarray
except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0)
#try: import pygame.fastevent
#except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import pygame.imageext; del pygame.imageext
except (ImportError,IOError):pass
try: import pygame.mixer_music; del pygame.mixer_music
except (ImportError,IOError):pass
def packager_imports():
"""
Some additional things that py2app/py2exe will want to see
"""
import OpenGL.GL
'''
#make Rects pickleable
import copyreg
def __rect_constructor(x,y,w,h):
return Rect(x,y,w,h)
def __rect_reduce(r):
assert type(r) == Rect
return __rect_constructor, (r.x, r.y, r.w, r.h)
copyreg.pickle(Rect, __rect_reduce, __rect_constructor)
#cleanup namespace
del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
| gpl-3.0 |
ueshin/apache-spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
junhuac/MQUIC | src/tools/perf/benchmarks/gpu_times.py | 8 | 2505 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from telemetry import benchmark
from telemetry.timeline import tracing_category_filter
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf import timeline_based_measurement
import page_sets
TOPLEVEL_CATEGORIES = ['disabled-by-default-gpu.device',
'disabled-by-default-gpu.service']
class _GPUTimes(perf_benchmark.PerfBenchmark):
def CreateTimelineBasedMeasurementOptions(self):
cat_string = ','.join(TOPLEVEL_CATEGORIES)
cat_filter = tracing_category_filter.TracingCategoryFilter(cat_string)
options = timeline_based_measurement.Options(overhead_level=cat_filter)
options.SetLegacyTimelineBasedMetrics([gpu_timeline.GPUTimelineMetric()])
return options
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/527543
class GPUTimesKeyMobileSites(_GPUTimes):
"""Measures GPU timeline metric on key mobile sites."""
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'gpu_times.key_mobile_sites_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/527543
class GPUTimesGpuRasterizationKeyMobileSites(_GPUTimes):
"""Measures GPU timeline metric on key mobile sites with GPU rasterization.
"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'gpu_times.gpu_rasterization.key_mobile_sites_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/517476
class GPUTimesTop25Sites(_GPUTimes):
"""Measures GPU timeline metric for the top 25 sites."""
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'gpu_times.top_25_smooth'
@benchmark.Disabled('all') # http://crbug.com/453131, http://crbug.com/517476
class GPUTimesGpuRasterizationTop25Sites(_GPUTimes):
"""Measures GPU timeline metric for the top 25 sites with GPU rasterization.
"""
page_set = page_sets.Top25SmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'gpu_times.gpu_rasterization.top_25_smooth'
| mit |
trevor/calendarserver | txweb2/dav/test/test_acl.py | 1 | 15231 | ##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
import os
from twisted.cred.portal import Portal
from txweb2 import responsecode
from txweb2.auth import basic
from txweb2.stream import MemoryStream
from txweb2.dav.util import davXMLFromStream
from txweb2.dav.auth import TwistedPasswordProperty, IPrincipal, DavRealm, TwistedPropertyChecker, AuthenticationWrapper
from txweb2.dav.fileop import rmdir
from txweb2.test.test_server import SimpleRequest
from txweb2.dav.test.util import Site, serialize
from txweb2.dav.test.test_resource import \
TestDAVPrincipalResource, TestPrincipalsCollection
from txdav.xml import element
import txweb2.dav.test.util
class ACL(txweb2.dav.test.util.TestCase):
"""
RFC 3744 (WebDAV ACL) tests.
"""
def createDocumentRoot(self):
docroot = self.mktemp()
os.mkdir(docroot)
userResource = TestDAVPrincipalResource("/principals/users/user01")
userResource.writeDeadProperty(TwistedPasswordProperty("user01"))
principalCollection = TestPrincipalsCollection(
"/principals/",
children={"users": TestPrincipalsCollection(
"/principals/users/",
children={"user01": userResource})})
rootResource = self.resource_class(
docroot, principalCollections=(principalCollection,))
portal = Portal(DavRealm())
portal.registerChecker(TwistedPropertyChecker())
credentialFactories = (basic.BasicCredentialFactory(""),)
loginInterfaces = (IPrincipal,)
self.site = Site(AuthenticationWrapper(
rootResource,
portal,
credentialFactories,
credentialFactories,
loginInterfaces
))
rootResource.setAccessControlList(self.grant(element.All()))
for name, acl in (
("none" , self.grant()),
("read" , self.grant(element.Read())),
("read-write" , self.grant(element.Read(), element.Write())),
("unlock" , self.grant(element.Unlock())),
("all" , self.grant(element.All())),
):
filename = os.path.join(docroot, name)
if not os.path.isfile(filename):
file(filename, "w").close()
resource = self.resource_class(filename)
resource.setAccessControlList(acl)
for name, acl in (
("nobind" , self.grant()),
("bind" , self.grant(element.Bind())),
("unbind" , self.grant(element.Bind(), element.Unbind())),
):
dirname = os.path.join(docroot, name)
if not os.path.isdir(dirname):
os.mkdir(dirname)
resource = self.resource_class(dirname)
resource.setAccessControlList(acl)
return docroot
def restore(self):
# Get rid of whatever messed up state the test has now so that we'll
# get a fresh docroot. This isn't very cool; tests should be doing
# less so that they don't need a fresh copy of this state.
if hasattr(self, "_docroot"):
rmdir(self._docroot)
del self._docroot
def test_COPY_MOVE_source(self):
"""
Verify source access controls during COPY and MOVE.
"""
def work():
dst_path = os.path.join(self.docroot, "copy_dst")
dst_uri = "/" + os.path.basename(dst_path)
for src, status in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.FORBIDDEN),
("unbind", responsecode.CREATED),
):
src_path = os.path.join(self.docroot, "src_" + src)
src_uri = "/" + os.path.basename(src_path)
if not os.path.isdir(src_path):
os.mkdir(src_path)
src_resource = self.resource_class(src_path)
src_resource.setAccessControlList({
"nobind": self.grant(),
"bind" : self.grant(element.Bind()),
"unbind": self.grant(element.Bind(), element.Unbind())
}[src])
for name, acl in (
("none" , self.grant()),
("read" , self.grant(element.Read())),
("read-write" , self.grant(element.Read(), element.Write())),
("unlock" , self.grant(element.Unlock())),
("all" , self.grant(element.All())),
):
filename = os.path.join(src_path, name)
if not os.path.isfile(filename):
file(filename, "w").close()
self.resource_class(filename).setAccessControlList(acl)
for method in ("COPY", "MOVE"):
for name, code in (
("none" , {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("read" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("read-write" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("unlock" , {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("all" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
):
path = os.path.join(src_path, name)
uri = src_uri + "/" + name
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", dst_uri)
_add_auth_header(request)
def test(response, code=code, path=path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_COPY_MOVE_dest(self):
"""
Verify destination access controls during COPY and MOVE.
"""
def work():
src_path = os.path.join(self.docroot, "read")
uri = "/" + os.path.basename(src_path)
for method in ("COPY", "MOVE"):
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.CREATED),
("unbind" , responsecode.CREATED),
):
dst_parent_path = os.path.join(self.docroot, name)
dst_path = os.path.join(dst_parent_path, "dst")
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, dst_path=dst_path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
self.restore()
return serialize(self.send, work())
def test_DELETE(self):
"""
Verify access controls during DELETE.
"""
def work():
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.FORBIDDEN),
("unbind" , responsecode.NO_CONTENT),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
file(path, "w").close()
request = SimpleRequest(self.site, "DELETE", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "DELETE", name)
yield (request, test)
return serialize(self.send, work())
def test_UNLOCK(self):
"""
Verify access controls during UNLOCK of unowned lock.
"""
raise NotImplementedError()
test_UNLOCK.todo = "access controls on UNLOCK unimplemented"
def test_MKCOL_PUT(self):
"""
Verify access controls during MKCOL.
"""
for method in ("MKCOL", "PUT"):
def work():
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.CREATED),
("unbind" , responsecode.CREATED),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
os.rmdir(path)
request = SimpleRequest(self.site, method, "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_PUT_exists(self):
"""
Verify access controls during PUT of existing file.
"""
def work():
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , responsecode.FORBIDDEN),
("read-write" , responsecode.NO_CONTENT),
("unlock" , responsecode.FORBIDDEN),
("all" , responsecode.NO_CONTENT),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PUT", "/" + name)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PUT", name)
yield (request, test)
return serialize(self.send, work())
def test_PROPFIND(self):
"""
Verify access controls during PROPFIND.
"""
raise NotImplementedError()
test_PROPFIND.todo = "access controls on PROPFIND unimplemented"
def test_PROPPATCH(self):
"""
Verify access controls during PROPPATCH.
"""
def work():
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , responsecode.FORBIDDEN),
("read-write" , responsecode.MULTI_STATUS),
("unlock" , responsecode.FORBIDDEN),
("all" , responsecode.MULTI_STATUS),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PROPPATCH", "/" + name)
request.stream = MemoryStream(
element.WebDAVDocument(element.PropertyUpdate()).toxml()
)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PROPPATCH", name)
yield (request, test)
return serialize(self.send, work())
def test_GET_REPORT(self):
"""
Verify access controls during GET and REPORT.
"""
def work():
for method in ("GET", "REPORT"):
if method == "GET":
ok = responsecode.OK
elif method == "REPORT":
ok = responsecode.MULTI_STATUS
else:
raise AssertionError("We shouldn't be here. (method = %r)" % (method,))
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , ok),
("read-write" , ok),
("unlock" , responsecode.FORBIDDEN),
("all" , ok),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, method, "/" + name)
if method == "REPORT":
request.stream = MemoryStream(element.PrincipalPropertySearch().toxml())
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def oops(self, request, response, code, method, name):
def gotResponseData(doc):
if doc is None:
doc_xml = None
else:
doc_xml = doc.toxml()
def fail(acl):
self.fail("Incorrect status code %s (!= %s) for %s of resource %s with %s ACL: %s\nACL: %s"
% (response.code, code, method, request.uri, name, doc_xml, acl.toxml()))
def getACL(resource):
return resource.accessControlList(request)
d = request.locateResource(request.uri)
d.addCallback(getACL)
d.addCallback(fail)
return d
d = davXMLFromStream(response.stream)
d.addCallback(gotResponseData)
return d
def _add_auth_header(request):
request.headers.setHeader(
"authorization",
("basic", "user01:user01".encode("base64"))
)
| apache-2.0 |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/python_api.py | 32 | 24587 | import math
import sys
import py
from six.moves import zip, filterfalse
from more_itertools.more import always_iterable
from _pytest.compat import isclass
from _pytest.compat import Mapping, Sequence
from _pytest.compat import STRING_TYPES
from _pytest.outcomes import fail
import _pytest._code
BASE_TYPE = (type, STRING_TYPES)
def _cmp_raises_type_error(self, other):
"""__cmp__ implementation which raises TypeError. Used
by Approx base classes to implement only == and != and raise a
TypeError for other comparisons.
Needed in Python 2 only, Python 3 all it takes is not implementing the
other operators at all.
"""
__tracebackhide__ = True
raise TypeError(
"Comparison operators other than == and != not supported by approx objects"
)
# builtin pytest.approx helper
class ApproxBase(object):
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, expected, rel=None, abs=None, nan_ok=False):
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
def __repr__(self):
raise NotImplementedError
def __eq__(self, actual):
return all(
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
)
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def _approx_scalar(self, x):
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons for numpy arrays.
"""
def __repr__(self):
# It might be nice to rewrite this function to account for the
# shape of the array...
import numpy as np
return "approx({!r})".format(
list(self._approx_scalar(x) for x in np.asarray(self.expected))
)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def __eq__(self, actual):
import numpy as np
# self.expected is supposed to always be an array here
if not np.isscalar(actual):
try:
actual = np.asarray(actual)
except: # noqa
raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual))
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# `actual` can either be a numpy array or a scalar, it is treated in
# `__eq__` before being passed to `ApproxBase.__eq__`, which is the
# only method that calls this one.
if np.isscalar(actual):
for i in np.ndindex(self.expected.shape):
yield actual, np.asscalar(self.expected[i])
else:
for i in np.ndindex(self.expected.shape):
yield np.asscalar(actual[i]), np.asscalar(self.expected[i])
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons for mappings where the values are numbers
(the keys can be anything).
"""
def __repr__(self):
return "approx({!r})".format(
{k: self._approx_scalar(v) for k, v in self.expected.items()}
)
def __eq__(self, actual):
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
class ApproxSequence(ApproxBase):
"""
Perform approximate comparisons for sequences of numbers.
"""
def __repr__(self):
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({!r})".format(
seq_type(self._approx_scalar(x) for x in self.expected)
)
def __eq__(self, actual):
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons for single numbers only.
"""
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12
DEFAULT_RELATIVE_TOLERANCE = 1e-6
def __repr__(self):
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
plus/minus symbol if this is python3 (it's too hard to get right for
python2).
"""
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = "{:.1e}".format(self.tolerance)
except ValueError:
vetted_tolerance = "???"
if sys.version_info[0] == 2:
return "{} +- {}".format(self.expected, vetted_tolerance)
else:
return u"{} \u00b1 {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual):
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
if _is_numpy_array(actual):
return ApproxNumpy(actual, self.abs, self.rel, self.nan_ok) == self.expected
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
) * abs(
self.expected
)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
from decimal import Decimal
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok=False):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
And for a ``numpy`` array against a scalar::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
from decimal import Decimal
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# This architecture is really driven by the need to support numpy arrays.
# The only way to override `==` for arrays without requiring that approx be
# the left operand is to inherit the approx object from `numpy.ndarray`.
# But that can't be a general solution, because it requires (1) numpy to be
# installed and (2) the expected value to be a numpy array. So the general
# solution is to delegate each type of expected value to a different class.
#
# This has the advantage that it made it easy to support mapping types
# (i.e. dict). The old code accepted mapping types, but would only compare
# their keys, which is probably not what most people would expect.
if _is_numpy_array(expected):
cls = ApproxNumpy
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES):
cls = ApproxSequence
elif isinstance(expected, Decimal):
cls = ApproxDecimal
else:
cls = ApproxScalar
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj):
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import inspect
for cls in inspect.getmro(type(obj)):
if cls.__module__ == "numpy":
try:
import numpy as np
return isinstance(obj, np.ndarray)
except ImportError:
pass
return False
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
r"""
Assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
:arg message: if specified, provides a custom failure message if the
exception is not raised
:arg match: if specified, asserts that the exception matches a text or regex
This helper produces a ``ExceptionInfo()`` object (see below).
You may use this function as a context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. versionchanged:: 2.10
In the context manager form you may use the keyword argument
``message`` to specify a custom failure message::
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
... pass
Traceback (most recent call last):
...
Failed: Expecting ZeroDivisionError
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type == ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type == ValueError
Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
**Legacy forms**
The forms below are fully supported but are discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
It is also possible to pass a string to be evaluated at runtime::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
The string will be evaluated using the same ``locals()`` and ``globals()``
at the moment of the ``raises`` call.
.. currentmodule:: _pytest._code
Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)):
msg = (
"exceptions must be old-style classes or"
" derived from BaseException, not %s"
)
raise TypeError(msg % type(exc))
message = "DID NOT RAISE {}".format(expected_exception)
match_expr = None
if not args:
if "message" in kwargs:
message = kwargs.pop("message")
if "match" in kwargs:
match_expr = kwargs.pop("match")
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(kwargs.keys())
raise TypeError(msg)
return RaisesContext(expected_exception, message, match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
# print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
fail(message)
raises.Exception = fail.Exception
class RaisesContext(object):
def __init__(self, expected_exception, message, match_expr):
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
fail(self.message)
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
if self.match_expr and suppress_exception:
self.excinfo.match(self.match_expr)
return suppress_exception
| mpl-2.0 |
tensorflow/agents | tf_agents/metrics/batched_py_metric_test.py | 1 | 4425 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.metrics.batched_py_metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.metrics import batched_py_metric
from tf_agents.metrics import py_metrics
from tf_agents.trajectories import trajectory
from tf_agents.utils import nest_utils
class BatchedPyMetricTest(tf.test.TestCase):
def setUp(self):
super(BatchedPyMetricTest, self).setUp()
# Order of args for trajectory methods:
# (observation, action, policy_info, reward, discount)
self._ts0 = nest_utils.stack_nested_arrays([
trajectory.boundary((), (), (), 0., 1.),
trajectory.boundary((), (), (), 0., 1.)
])
self._ts1 = nest_utils.stack_nested_arrays([
trajectory.first((), (), (), 1., 1.),
trajectory.first((), (), (), 2., 1.)
])
self._ts2 = nest_utils.stack_nested_arrays([
trajectory.last((), (), (), 3., 1.),
trajectory.last((), (), (), 4., 1.)
])
self._ts3 = nest_utils.stack_nested_arrays([
trajectory.boundary((), (), (), 0., 1.),
trajectory.boundary((), (), (), 0., 1.)
])
self._ts4 = nest_utils.stack_nested_arrays([
trajectory.first((), (), (), 5., 1.),
trajectory.first((), (), (), 6., 1.)
])
self._ts5 = nest_utils.stack_nested_arrays([
trajectory.last((), (), (), 7., 1.),
trajectory.last((), (), (), 8., 1.)
])
def testMetricIsComputedCorrectlyNoSteps(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
self.assertEqual(batched_avg_return_metric.result(), 0)
def testMetricIsComputedCorrectlyPartialEpisode(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
batched_avg_return_metric(self._ts0)
batched_avg_return_metric(self._ts1)
self.assertEqual(batched_avg_return_metric.result(), 0)
def testMetricIsComputedCorrectlyOneEpisode(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
batched_avg_return_metric(self._ts0)
batched_avg_return_metric(self._ts1)
batched_avg_return_metric(self._ts2)
self.assertEqual(batched_avg_return_metric.result(), 5)
def testMetricIsComputedCorrectlyOneAndPartialEpisode(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
batched_avg_return_metric(self._ts0)
batched_avg_return_metric(self._ts1)
batched_avg_return_metric(self._ts2)
batched_avg_return_metric(self._ts3)
batched_avg_return_metric(self._ts4)
self.assertEqual(batched_avg_return_metric.result(), 5)
def testMetricIsComputedCorrectlyTwoEpisodes(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
batched_avg_return_metric(self._ts0)
batched_avg_return_metric(self._ts1)
batched_avg_return_metric(self._ts2)
batched_avg_return_metric(self._ts3)
batched_avg_return_metric(self._ts4)
batched_avg_return_metric(self._ts5)
self.assertEqual(batched_avg_return_metric.result(), 9)
def testReset(self):
batched_avg_return_metric = batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric)
batched_avg_return_metric(self._ts0)
batched_avg_return_metric(self._ts1)
batched_avg_return_metric(self._ts2)
batched_avg_return_metric.reset()
batched_avg_return_metric(self._ts3)
batched_avg_return_metric(self._ts4)
batched_avg_return_metric(self._ts5)
self.assertEqual(batched_avg_return_metric.result(), 13)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
supergis/QGIS | python/plugins/processing/algs/grass/GrassAlgorithmProvider.py | 9 | 4599 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GrassAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingLog import ProcessingLog
from GrassUtils import GrassUtils
from GrassAlgorithm import GrassAlgorithm
from nviz import nviz
from processing.tools.system import isMac, isWindows
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class GrassAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self.createAlgsList() # Preloading algorithms to speed up
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
if isWindows() or isMac():
ProcessingConfig.addSetting(Setting(self.getDescription(),
GrassUtils.GRASS_FOLDER, self.tr('GRASS folder'),
GrassUtils.grassPath(), valuetype=Setting.FOLDER))
ProcessingConfig.addSetting(Setting(self.getDescription(),
GrassUtils.GRASS_WIN_SHELL, self.tr('Msys folder'),
GrassUtils.grassWinShell(), valuetype=Setting.FOLDER))
ProcessingConfig.addSetting(Setting(self.getDescription(),
GrassUtils.GRASS_LOG_COMMANDS,
self.tr('Log execution commands'), False))
ProcessingConfig.addSetting(Setting(self.getDescription(),
GrassUtils.GRASS_LOG_CONSOLE,
self.tr('Log console output'), False))
def unload(self):
AlgorithmProvider.unload(self)
if isWindows() or isMac():
ProcessingConfig.removeSetting(GrassUtils.GRASS_FOLDER)
ProcessingConfig.removeSetting(GrassUtils.GRASS_WIN_SHELL)
ProcessingConfig.removeSetting(GrassUtils.GRASS_LOG_COMMANDS)
ProcessingConfig.removeSetting(GrassUtils.GRASS_LOG_CONSOLE)
def createAlgsList(self):
self.preloadedAlgs = []
folder = GrassUtils.grassDescriptionPath()
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('txt'):
try:
alg = GrassAlgorithm(os.path.join(folder, descriptionFile))
if alg.name.strip() != '':
self.preloadedAlgs.append(alg)
else:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('Could not open GRASS algorithm: %s' % descriptionFile))
except Exception as e:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('Could not open GRASS algorithm: %s' % descriptionFile))
self.preloadedAlgs.append(nviz())
def _loadAlgorithms(self):
self.algs = self.preloadedAlgs
def getDescription(self):
return self.tr('GRASS commands')
def getName(self):
return 'grass'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'grass.png'))
def getSupportedOutputVectorLayerExtensions(self):
return ['shp']
def getSupportedOutputRasterLayerExtensions(self):
return ['tif']
| gpl-2.0 |
TeamBliss-Devices/android_kernel_samsung_jflte | build-all.py | 37 | 10510 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
#
# Modify for supporting of the Samsung JF targets.
#
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
import tarfile
version = 'build-all.py, version 0.01'
build_dir = '../../output/all-kernels'
make_command = ["zImage", "modules"]
make_env = os.environ
pwd = os.environ.get("PWD")
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': pwd + '/../prebuilts/gcc/linux-x86/arm/arm-eabi-4.8/bin/arm-eabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/jf_???_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
zImage_name = '%s/arch/arm/boot/zImage' % (dest_dir)
bootImage_name = '%s/arch/arm/boot/boot.img' % (dest_dir)
signedImage_name = '%s/arch/arm/boot/signed_boot.img' % (dest_dir)
tarball_name = '%s/%s.tar' % (build_dir, target)
if target == 'jf_att':
signing = "SGH-I337_NA_ATT_C"
elif target == 'jactive_att':
signing = "SGH-I537_NA_ATT_C"
elif target == 'jf_tmo':
signing = "SGH-M919_NA_TMB_C"
elif target == 'jf_vzw':
signing = "SCH-I545_NA_VZW_C"
elif target == 'jf_spr':
signing = "SPH-L720_NA_SPR_C"
elif target == 'jf_cri':
signing = "SCH-R970C_NA_CRI_C"
elif target == 'jf_usc':
signing = "SCH-R970_NA_USC_C"
elif target == 'jf_eur':
signing = "GT-I9505_EUR_XX_C"
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target[:-4]
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'VARIANT_DEFCONFIG=%s_defconfig' % target,
'DEBUG_DEFCONFIG=%seng_defconfig' % target[:-4],
# 'SELINUX_DEFCONFIG=%sselinux_defconfig' % target[:-4],
# 'SELINUX_LOG_DEFCONFIG=%sselinux_log_defconfig' % target[:-4],
'%s_defconfig' % target[:-4]], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
if result == 0:
os.rename(zImage_name, bootImage_name)
os.system('java -jar ../../buildscript/tools/signclient.jar -runtype ss_openssl_all -model %s -input %s -output %s' %(signing,bootImage_name,signedImage_name))
tar = tarfile.open(tarball_name, "w")
tar.add(signedImage_name, arcname='boot.img')
tar.close()
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] jf_att jf_vzw jf_tmo jf_spr ... -- List specific targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
parser.add_option('-i', '--ignore-errors', action='store_true',
dest="ignore",
help="Ignore errors from commands")
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if options.ignore:
make_command.append("-i")
make_command.append("-k")
if args == ['all']:
build_many(configs, configs.keys())
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
MediaKraken/MediaKraken_Deployment | source/testing/test_common/test_common_network_iscp.py | 1 | 1193 | """
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import sys
sys.path.append('.')
from common import common_network_iscp
# the EISCP auto discover only works on 2011 models or higher
def test_com_net_eiscp_discovery():
"""
Test function
"""
common_network_iscp.com_net_eiscp_discovery('10.0.0.1')
# def MK_EISCP_Connection(ip_addr):
# def MK_EISCP_Disconnect(eiscp_device):
# def MK_EISCP_Command(eiscp_device, eiscp_command):
# def MK_EISCP_Command_RAW(eiscp_device, eiscp_raw_command):
| gpl-3.0 |
makinacorpus/geonode | src/GeoNodePy/geonode/maps/tests.py | 1 | 47766 | from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User, AnonymousUser, Group
import geonode.maps.models
import geonode.maps.views
from geonode.maps.models import Map, Layer, User
from geonode.maps.utils import get_valid_user, GeoNodeException
from mock import Mock, patch
import json
import os
import base64
_gs_resource = Mock()
_gs_resource.native_bbox = [1, 2, 3, 4]
Layer.objects.geonetwork = Mock()
Layer.objects.gs_catalog = Mock()
Layer.objects.gs_catalog.get_resource.return_value = _gs_resource
geonode.maps.models.get_csw = Mock()
geonode.maps.models.get_csw.return_value.records.get.return_value.identification.keywords = { 'list': [] }
_csw_resource = Mock()
_csw_resource.protocol = "WWW:LINK-1.0-http--link"
_csw_resource.url = "http://example.com/"
_csw_resource.description = "example link"
geonode.maps.models.get_csw.return_value.records.get.return_value.distribution.online = [_csw_resource]
class MapTest(TestCase):
"""Tests geonode.maps app/module
"""
fixtures = ['test_data.json', 'map_data.json']
GEOSERVER = False
def setUp(self):
# If Geoserver and GeoNetwork are not running
# avoid running tests that call those views.
if "GEOSERVER" in os.environ:
self.GEOSERVER = True
default_abstract = "This is a demonstration of GeoNode, an application \
for assembling and publishing web based maps. After adding layers to the map, \
use the Save Map button above to contribute your map to the GeoNode \
community."
default_title = "GeoNode Default Map"
# maps/models.py tests
# maps.models.Layer
def test_layer_save_to_geoserver(self):
pass
def test_layer_save_to_geonetwork(self):
pass
def test_post_save_layer(self):
pass
def test_layer_verify(self):
pass
def test_layer_download_links(self):
pass
def test_layer_maps(self):
pass
def test_layer_metadata(self):
pass
def test_layer_metadata_csw(self):
pass
def test_layer_attribute_names(self):
pass
def test_layer_display_type(self):
pass
def test_layer_delete_from_geoserver(self):
pass
def test_layer_delete_from_geonetwork(self):
pass
def test_delete_layer(self):
pass
def test_layer_resource(self):
pass
def test_layer_get_metadata_links(self):
pass
def test_layer_set_metadata_links(self):
pass
def test_layer_get_default_style(self):
pass
def test_layer_set_default_style(self):
pass
def test_layer_get_styles(self):
pass
def test_layer_set_styles(self):
pass
def test_layer_service_type(self):
pass
def test_layer_publishing(self):
pass
def test_layer_poc_role(self):
pass
def test_layer_metadata_author_role(self):
pass
def test_layer_set_poc(self):
pass
def test_layer_get_poc(self):
pass
def test_layer_set_metadata_author(self):
pass
def test_layer_get_metadata_author(self):
pass
def test_layer_populate_from_gs(self):
pass
def test_layer_autopopulate(self):
pass
def test_layer_populate_from_gn(self):
pass
def test_layer_keyword_list(self):
pass
def test_layer_set_bbox(self):
pass
def test_layer_get_absolute_url(self):
pass
def test_layer_set_default_permissions(self):
"""Verify that Layer.set_default_permissions is behaving as expected
"""
# Get a Layer object to work with
layer = Layer.objects.all()[0]
# Should we set some 'current' permissions to do further testing?
# Save the layers Current Permissions
current_perms = layer.get_all_level_info()
# Set the default permissions
layer.set_default_permissions()
# Test that LEVEL_READ is set for ANONYMOUS_USERS and AUTHENTICATED_USERS
self.assertEqual(layer.get_gen_level(geonode.core.models.ANONYMOUS_USERS), layer.LEVEL_READ)
self.assertEqual(layer.get_gen_level(geonode.core.models.AUTHENTICATED_USERS), layer.LEVEL_READ)
# Test that the previous Permissions were set to LEVEL_NONE
for username in current_perms['users'].keys():
user = User.objects.get(username=username)
self.assertEqual(layer.get_user_level(user), layer.LEVEL_NONE)
# Test that the owner was assigned LEVEL_ADMIN
if layer.owner:
self.assertEqual(layer.owner, layer.LEVEL_ADMIN)
# maps.models.Map
def test_map_center(self):
pass
def test_map_layers(self):
pass
def test_map_local_layers(self):
pass
viewer_config_alternative = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title2",
"abstract": "Abstract2"
},
"sources": {
"capra": {
"url":"http://localhost:8001/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"zoom":7
}
}
"""
def test_map_json(self):
c = Client()
# Test that saving a map when not logged in gives 401
response = c.put("/maps/1/data",data=MapTest.viewer_config,content_type="text/json")
self.assertEqual(response.status_code,401)
log = c.login(username="bobby", password="bob")
response = c.put("/maps/1/data",data=MapTest.viewer_config_alternative,content_type="text/json")
self.assertEqual(response.status_code,204)
map = Map.objects.get(id=1)
self.assertEquals(map.title, "Title2")
self.assertEquals(map.abstract, "Abstract2")
self.assertEquals(map.layer_set.all().count(), 1)
def test_map_viewer_json(self):
pass
def test_map_update_from_viewer(self):
pass
def test_map_get_absolute_url(self):
pass
def test_map_set_default_permissions(self):
pass
# maps.models.MapLayerManager
def test_mlm_from_viewer_config(self):
pass
# maps.models.MapLayer
def test_map_layer_from_viewer_config(self):
pass
def test_map_layer_source_config(self):
pass
def test_map_layer_layer_config(self):
pass
def test_map_layer_local_link(self):
pass
# maps/views.py tests
def test_project_center(self):
pass
def test_baselayer(self):
pass
def test_bbox_to_wkt(self):
pass
def test_view_js(self):
pass
def test_view(self):
pass
# Maps Tests
# This is a valid map viewer config, based on the sample data provided
# by andreas in issue 566. -dwins
viewer_config = """
{
"defaultSourceType": "gx_wmssource",
"about": {
"title": "Title",
"abstract": "Abstract"
},
"sources": {
"capra": {
"url":"http://localhost:8001/geoserver/wms"
}
},
"map": {
"projection":"EPSG:900913",
"units":"m",
"maxResolution":156543.0339,
"maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34],
"center":[-9428760.8688778,1436891.8972581],
"layers":[{
"source":"capra",
"buffer":0,
"wms":"capra",
"name":"base:nic_admin"
}],
"zoom":7
}
}
"""
def test_map_controller(self):
pass
def test_new_map(self):
pass
def test_map_save(self):
"""POST /maps -> Test saving a new map"""
c = Client()
# Test that saving a map when not logged in gives 401
response = c.post("/maps/",data=MapTest.viewer_config,content_type="text/json")
self.assertEqual(response.status_code,401)
# Test successful new map creation
log = c.login(username="bobby", password="bob")
response = c.post("/maps/",data=MapTest.viewer_config,content_type="text/json")
self.assertEquals(response.status_code,201)
map_id = int(response['Location'].split('/')[-1])
c.logout()
self.assertEquals(map_id,2)
map = Map.objects.get(id=map_id)
self.assertEquals(map.title, "Title")
self.assertEquals(map.abstract, "Abstract")
self.assertEquals(map.layer_set.all().count(), 1)
# Test an invalid map creation request
log = c.login(username="bobby", password="bob")
response = c.post("/maps/",data="not a valid viewer config",content_type="text/json")
self.assertEquals(response.status_code,400)
c.logout()
def test_map_fetch(self):
"""/maps/[id]/data -> Test fetching a map in JSON"""
map = Map.objects.get(id="1")
c = Client()
response = c.get("/maps/%s/data" % map.id)
self.assertEquals(response.status_code, 200)
cfg = json.loads(response.content)
self.assertEquals(cfg["about"]["abstract"], self.default_abstract)
self.assertEquals(cfg["about"]["title"], self.default_title)
self.assertEquals(len(cfg["map"]["layers"]), 5)
def test_map_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a JSON map configuration"""
map = Map.objects.get(id=1)
cfg = map.viewer_json()
self.assertEquals(cfg['about']['abstract'], MapTest.default_abstract)
self.assertEquals(cfg['about']['title'], MapTest.default_title)
def is_wms_layer(x):
return cfg['sources'][x['source']]['ptype'] == 'gxp_wmscsource'
layernames = [x['name'] for x in cfg['map']['layers'] if is_wms_layer(x)]
self.assertEquals(layernames, ['base:CA',])
def test_newmap_to_json(self):
""" Make some assertions about the data structure produced for serialization
to a new JSON map configuration"""
response = Client().get("/maps/new/data")
cfg = json.loads(response.content)
self.assertEquals(cfg['defaultSourceType'], "gxp_wmscsource")
def test_map_details(self):
"""/maps/1 -> Test accessing the detail view of a map"""
map = Map.objects.get(id=1)
c = Client()
response = c.get("/maps/%s" % map.id)
self.assertEquals(response.status_code,200)
def test_delete_map(self):
pass
def test_map_detail(self):
pass
def test_describe_map(self):
pass
def test_embed_map(self):
pass
# Batch Tests
def test_map_download(self):
pass
def test_check_download(self):
pass
def test_batch_layer_download(self):
pass
def test_batch_delete(self):
pass
# Permissions Tests
# Users
# - admin (pk=2)
# - bobby (pk=1)
# Inherited
# - LEVEL_NONE = _none
# Layer
# - LEVEL_READ = layer_read
# - LEVEL_WRITE = layer_readwrite
# - LEVEL_ADMIN = layer_admin
# Map
# - LEVEL_READ = map_read
# - LEVEL_WRITE = map_readwrite
# - LEVEL_ADMIN = map_admin
# FIXME: Add a comprehensive set of permissions specifications that allow us
# to test as many conditions as is possible/necessary
# If anonymous and/or authenticated are not specified,
# should set_layer_permissions remove any existing perms granted??
perm_spec = {"anonymous":"_none","authenticated":"_none","users":[["admin","layer_readwrite"],["group1","layer_readwrite"], ["group2","layer_readonly"]]}
def test_set_layer_permissions(self):
"""Verify that the set_layer_permissions view is behaving as expected
"""
# Get a layer to work with
layer = Layer.objects.all()[0]
# Save the Layers current permissions
current_perms = layer.get_all_level_info()
# FIXME Test a comprehensive set of permisssions specifications
# Set the Permissions
geonode.maps.views.set_object_permissions(layer, self.perm_spec)
# Test that the Permissions for ANONYMOUS_USERS and AUTHENTICATED_USERS were set correctly
self.assertEqual(layer.get_gen_level(geonode.core.models.ANONYMOUS_USERS), layer.LEVEL_NONE)
self.assertEqual(layer.get_gen_level(geonode.core.models.AUTHENTICATED_USERS), layer.LEVEL_NONE)
# Test that previous permissions for users other than ones specified in
# the perm_spec (and the layers owner) were removed
users_and_groups = [n for (n, p) in self.perm_spec['users']]
levels = layer.get_user_levels().exclude(user__username__in = users_and_groups + [layer.owner])
self.assertEqual(len(levels), 0)
# Test that previous permissions for groups other than ones specified in
# the perm_spec (and the layers owner) were removed
levels = layer.get_group_levels().exclude(group__name__in = users_and_groups)
self.assertEqual(len(levels), 0)
# Test that the users and groups permissions specified in the perm_spec were applied properly
for name, level in self.perm_spec['users']:
group = None
user = None
try:
group = Group.objects.get(name=name)
except Group.DoesNotExist:
user = geonode.maps.models.User.objects.get(username=name)
if user:
self.assertEqual(layer.get_user_level(user), level)
else:
self.assertEqual(layer.get_group_level(group), level)
def test_view_layer_permissions(self):
"""Verify that the view_layer_permissions view is behaving as expected
"""
# I'm not sure this view is actually being used anywhere (jj0hns0n 2011-04-13)
pass
def test_ajax_layer_permissions(self):
"""Verify that the ajax_layer_permissions view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].typename
invalid_layer_typename = "n0ch@nc3"
c = Client()
# Test that an invalid layer.typename is handled for properly
response = c.post("/data/%s/ajax-permissions" % invalid_layer_typename,
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that POST is required
response = c.get("/data/%s/ajax-permissions" % valid_layer_typename)
self.assertEquals(response.status_code, 405)
# Test that a user is required to have maps.change_layer_permissions
# First test un-authenticated
response = c.post("/data/%s/ajax-permissions" % valid_layer_typename,
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = c.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = c.post("/data/%s/ajax-permissions" % valid_layer_typename,
data=json.dumps(self.perm_spec),
content_type="application/json")
self.assertEquals(response.status_code, 401)
# Login as a user with the proper permission and test the endpoint
logged_in = c.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = c.post("/data/%s/ajax-permissions" % valid_layer_typename,
data=json.dumps(self.perm_spec),
content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200)
# Test that the permissions specification is applied
# Should we do this here, or assume the tests in
# test_set_layer_permissions will handle for that?
def test_layer_acls(self):
""" Verify that the layer_acls view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = "%s:%s" % (settings.GEOSERVER_CREDENTIALS[0],settings.GEOSERVER_CREDENTIALS[1])
invalid_uname_pw = "%s:%s" % ("n0t", "v@l1d")
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(invalid_uname_pw),
}
# Test that requesting when supplying the GEOSERVER_CREDENTIALS returns the expected json
expected_result = {'rw': [],'ro': [],'name': settings.GEOSERVER_CREDENTIALS[0],'is_superuser': True,'is_anonymous': False}
c = Client()
response = c.get('/data/acls', **valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals(expected_result, response_json)
# Test that requesting when supplying invalid credentials returns the appropriate error code
response = c.get('/data/acls', **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
logged_in = c.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = c.get("/data/acls")
response_json = json.loads(response.content)
# Test group access rights: robert is not part of the perm_spec, but he
# belong to group2 which is mentioned as read_only, so robert should have
# read_only access
layer = Layer.objects.all()[0]
geonode.maps.views.set_object_permissions(layer, self.perm_spec)
logged_in = c.login(username='robert', password='bob')
response = c.get("/data/acls")
response_json = json.loads(response.content)
self.assertTrue(layer.typename in response_json['ro'])
def test_view_perms_context(self):
# It seems that since view_layer_permissions and view_map_permissions
# are no longer used, that this view is also no longer used since those
# are the only 2 places it is ever called (jj0hns0n 2011-04-13)
pass
def test_perms_info(self):
""" Verify that the perms_info view is behaving as expected
"""
# Test with a Layer object
layer = Layer.objects.all()[0]
layer_info = layer.get_all_level_info()
info = geonode.maps.views._perms_info(layer, geonode.maps.views.LAYER_LEV_NAMES)
# Test that ANONYMOUS_USERS and AUTHENTICATED_USERS are set properly
self.assertEqual(info[geonode.maps.models.ANONYMOUS_USERS], layer.LEVEL_READ)
self.assertEqual(info[geonode.maps.models.AUTHENTICATED_USERS], layer.LEVEL_READ)
self.assertEqual(info['users'], sorted(layer_info['users'].items()))
# TODO Much more to do here once jj0hns0n understands the ACL system better
# Test with a Map object
# TODO
def test_perms_info_json(self):
# Should only need to verify that valid json is returned?
pass
def test_fix_map_perms_for_editor(self):
# I'm not sure this view is actually being used anywhere (jj0hns0n 2011-04-13)
pass
def test_handle_perms_edit(self):
# I'm not sure this view is actually being used anywhere (jj0hns0n 2011-04-13)
pass
def test_get_basic_auth_info(self):
# How do we test this? Perhaps as a part of test_layer_acls
pass
def test_view_map_permissions(self):
pass
def test_set_map_permissions(self):
pass
def test_ajax_map_permissions(self):
pass
def test_batch_permissions(self):
specs = {u'layers': [], u'maps': [u'1'], u'permissions': {u'users': [[u'group1', u'layer_readwrite']]}}
c = Client()
logged_in = c.login(username='admin', password='admin')
response = c.post("/data/api/batch_permissions",
data=json.dumps(specs),
content_type="application/json")
self.assertEquals(response.status_code, 200)
map = Map.objects.get(pk=1)
group = Group.objects.get(name='group1')
self.assertEqual(map.get_group_level(group), u'map_readwrite')
# Data Tests
def test_data(self):
'''/data/ -> Test accessing the data page'''
c = Client()
response = c.get('/data/')
self.failUnlessEqual(response.status_code, 200)
def test_browse_data(self):
pass
def test_describe_data(self):
'''/data/base:CA?describe -> Test accessing the description of a layer '''
from django.contrib.auth.models import User
self.assertEqual(2, User.objects.all().count())
c = Client()
response = c.get('/data/base:CA?describe')
# Since we are not authenticated, we should not be able to access it
self.failUnlessEqual(response.status_code, 302)
# but if we log in ...
c.login(username='bobby', password='bob')
# ... all should be good
if self.GEOSERVER:
response = c.get('/data/base:CA?describe')
self.failUnlessEqual(response.status_code, 200)
else:
# If Geoserver is not running, this should give a runtime error
try:
c.get('/data/base:CA?describe')
except RuntimeError:
pass
# Layer Tests
# Test layer upload endpoint
def test_upload_layer(self):
c = Client()
# Test redirection to login form when not logged in
response = c.get("/data/upload")
self.assertEquals(response.status_code,302)
# Test return of upload form when logged in
c.login(username="bobby", password="bob")
response = c.get("/data/upload")
self.assertEquals(response.status_code,200)
def test_handle_layer_upload(self):
pass
def test_update_layer(self):
pass
def test_describe_layer(self):
pass
def test_remove_layer(self):
pass
def test_change_layer_default_style(self):
pass
def test_layer_controller(self):
pass
def test_extract_links(self):
pass
# Search Tests
def test_search(self):
'''/data/search/ -> Test accessing the data search page'''
c = Client()
response = c.get('/data/search/')
self.failUnlessEqual(response.status_code, 200)
def test_search_page(self):
pass
def test_build_search_result(self):
pass
def test_metadata_search(self):
c = Client()
#test around _metadata_search helper
with patch.object(geonode.maps.views,'_metadata_search') as mock_ms:
result = {
'rows' : [{
'uuid' : 1214431 # does not exist
}
]
}
mock_ms.return_value = result
response = c.get("/data/search/api?q=foo&start=5&limit=10")
call_args = geonode.maps.views._metadata_search.call_args
self.assertEqual(call_args[0][0], "foo")
self.assertEqual(call_args[0][1], 5)
self.assertEqual(call_args[0][2], 10)
def test_search_result_detail(self):
pass
def test_split_query(self):
query = 'alpha "beta gamma" delta '
keywords = geonode.maps.views._split_query(query)
self.assertEqual(keywords[0], "alpha")
self.assertEqual(keywords[1], "beta gamma")
self.assertEqual(keywords[2], "delta")
def test_search_api(self):
'''/data/search/api -> Test accessing the data search api JSON'''
if self.GEOSERVER:
c = Client()
response = c.get('/data/search/api')
self.failUnlessEqual(response.status_code, 200)
def test_search_detail(self):
'''
/data/search/detail -> Test accessing the data search detail for a layer
Disabled due to reliance on consistent UUIDs across loads.
'''
if self.GEOSERVER:
layer = Layer.objects.all()[0]
# save to geonetwork so we know the uuid is consistent between
# django db and geonetwork
layer.save_to_geonetwork()
c = Client()
response = c.get('/data/search/detail', {'uuid':layer.uuid})
self.failUnlessEqual(response.status_code, 200)
def test_search_template(self):
from django.template import Context
from django.template.loader import get_template
layer = Layer.objects.all()[0]
tpl = get_template("maps/csw/transaction_insert.xml")
ctx = Context({
'layer': layer,
})
md_doc = tpl.render(ctx)
self.assert_("None" not in md_doc, "None in " + md_doc)
def test_describe_data(self):
'''/data/base:CA?describe -> Test accessing the description of a layer '''
from django.contrib.auth.models import User
self.assertEqual(3, User.objects.all().count())
c = Client()
response = c.get('/data/base:CA?describe')
# Since we are not authenticated, we should not be able to access it
self.failUnlessEqual(response.status_code, 302)
# but if we log in ...
c.login(username='bobby', password='bob')
# ... all should be good
if self.GEOSERVER:
response = c.get('/data/base:CA?describe')
self.failUnlessEqual(response.status_code, 200)
else:
# If Geoserver is not running, this should give a runtime error
try:
c.get('/data/base:CA?describe')
except RuntimeError:
pass
def test_layer_save(self):
lyr = Layer.objects.get(pk=1)
lyr.keywords = "saving keywords"
lyr.save()
self.assertEqual(lyr.keyword_list(), ["saving", "keywords"])
self.assertEqual(lyr.resource.keywords, ["saving", "keywords"])
self.assertEqual(_gs_resource.keywords, ["saving", "keywords"])
def test_get_valid_user(self):
# Verify it accepts an admin user
adminuser = User.objects.get(is_superuser=True)
valid_user = get_valid_user(adminuser)
msg = ('Passed in a valid admin user "%s" but got "%s" in return'
% (adminuser, valid_user))
assert valid_user.id == adminuser.id, msg
# Verify it returns a valid user after receiving None
valid_user = get_valid_user(None)
msg = ('Expected valid user after passing None, got "%s"' % valid_user)
assert isinstance(valid_user, User), msg
newuser = User.objects.create(username='arieluser')
valid_user = get_valid_user(newuser)
msg = ('Passed in a valid user "%s" but got "%s" in return'
% (newuser, valid_user))
assert valid_user.id == newuser.id, msg
valid_user = get_valid_user('arieluser')
msg = ('Passed in a valid user by username "%s" but got'
' "%s" in return' % ('arieluser', valid_user))
assert valid_user.username == 'arieluser', msg
nn = AnonymousUser()
self.assertRaises(GeoNodeException, get_valid_user, nn)
def test_layer_generate_links(self):
"""Verify generating download/image links for a layer"""
lyr = Layer.objects.get(pk=1)
orig_bbox = lyr.resource.latlon_bbox
lyr.resource.latlon_bbox = ["1", "2", "3", "3"]
try:
lyr.download_links()
except ZeroDivisionError:
self.fail("Threw division error while generating download links")
finally:
lyr.resource.latlon_bbox = orig_bbox
class ViewTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
fixtures = ['test_data.json', 'map_data.json']
def test_new_map_without_layers(self):
client = Client()
response = client.get("/maps/new")
def test_new_map_with_layer(self):
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_gs:
mock_gs.get_resource.return_value.latlon_bbox = ["0", "1", "0", "1"]
client = Client()
layer = Layer.objects.all()[0]
response = client.get("/maps/new?layer=" + layer.typename)
def test_new_map_with_empty_bbox_layer(self):
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_gs:
mock_gs.get_resource.return_value.latlon_bbox = ["0", "0", "0", "0"]
client = Client()
layer = Layer.objects.all()[0]
response = client.get("/maps/new?layer=" + layer.typename)
from geonode.maps.forms import JSONField, LayerUploadForm, NewLayerUploadForm
from django.core.files.uploadedfile import SimpleUploadedFile
class FormTest(TestCase):
## NOTE: we don't care about file content for many of these tests (the
## forms under test validate based only on file name, and leave actual
## content inspection to GeoServer) but Django's form validation will omit
## any files with empty bodies.
##
## That is, this leads to mysterious test failures:
## SimpleUploadedFile('foo', '')
##
## And this should be used instead to avoid that:
## SimpleUploadedFile('foo', ' ')
def setUp(self):
pass
def tearDown(self):
pass
def testJSONField(self):
from django.forms import ValidationError
field = JSONField()
# a valid JSON document should pass
field.clean('{ "users": [] }')
# text which is not JSON should fail
self.assertRaises(ValidationError, lambda: field.clean('<users></users>'))
def testShapefileValidation(self):
files = dict(
base_file=SimpleUploadedFile('foo.shp', ' '),
shx_file=SimpleUploadedFile('foo.shx', ' '),
dbf_file=SimpleUploadedFile('foo.dbf', ' '),
prj_file=SimpleUploadedFile('foo.prj', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.SHP', ' '),
shx_file=SimpleUploadedFile('foo.SHX', ' '),
dbf_file=SimpleUploadedFile('foo.DBF', ' '),
prj_file=SimpleUploadedFile('foo.PRJ', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.SHP', ' '),
shx_file=SimpleUploadedFile('foo.shx', ' '),
dbf_file=SimpleUploadedFile('foo.dbf', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.SHP', ' '),
shx_file=SimpleUploadedFile('foo.shx', ' '),
dbf_file=SimpleUploadedFile('foo.dbf', ' '),
prj_file=SimpleUploadedFile('foo.PRJ', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.SHP', ' '),
shx_file=SimpleUploadedFile('bar.shx', ' '),
dbf_file=SimpleUploadedFile('bar.dbf', ' '),
prj_file=SimpleUploadedFile('bar.PRJ', ' '))
self.assertFalse(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.shp', ' '),
dbf_file=SimpleUploadedFile('foo.dbf', ' '),
prj_file=SimpleUploadedFile('foo.PRJ', ' '))
self.assertFalse(LayerUploadForm(dict(), files).is_valid())
files = dict(
base_file=SimpleUploadedFile('foo.txt', ' '),
shx_file=SimpleUploadedFile('foo.shx', ' '),
dbf_file=SimpleUploadedFile('foo.sld', ' '),
prj_file=SimpleUploadedFile('foo.prj', ' '))
self.assertFalse(LayerUploadForm(dict(), files).is_valid())
def testGeoTiffValidation(self):
files = dict(base_file=SimpleUploadedFile('foo.tif', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.TIF', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.tiff', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.TIF', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.geotif', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.GEOTIF', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.geotiff', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
files = dict(base_file=SimpleUploadedFile('foo.GEOTIF', ' '))
self.assertTrue(LayerUploadForm(dict(), files).is_valid())
def testWriteFiles(self):
files = dict(
base_file=SimpleUploadedFile('foo.shp', ' '),
shx_file=SimpleUploadedFile('foo.shx', ' '),
dbf_file=SimpleUploadedFile('foo.dbf', ' '),
prj_file=SimpleUploadedFile('foo.prj', ' '))
form = LayerUploadForm(dict(), files)
self.assertTrue(form.is_valid())
tempdir, base_file = form.write_files()
self.assertEquals(set(os.listdir(tempdir)),
set(['foo.shp', 'foo.shx', 'foo.dbf', 'foo.prj']))
class UtilsTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
fixtures = ['map_data.json']
def test_layer_type(self):
from geonode.maps.utils import layer_type
from geoserver.resource import FeatureType, Coverage
self.assertEquals(layer_type('foo.shp'), FeatureType.resource_type)
self.assertEquals(layer_type('foo.SHP'), FeatureType.resource_type)
self.assertEquals(layer_type('foo.sHp'), FeatureType.resource_type)
self.assertEquals(layer_type('foo.tif'), Coverage.resource_type)
self.assertEquals(layer_type('foo.TIF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.TiF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.geotif'), Coverage.resource_type)
self.assertEquals(layer_type('foo.GEOTIF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.gEoTiF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.tiff'), Coverage.resource_type)
self.assertEquals(layer_type('foo.TIFF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.TiFf'), Coverage.resource_type)
self.assertEquals(layer_type('foo.geotiff'), Coverage.resource_type)
self.assertEquals(layer_type('foo.GEOTIFF'), Coverage.resource_type)
self.assertEquals(layer_type('foo.gEoTiFf'), Coverage.resource_type)
# basically anything else should produce a GeoNodeException
self.assertRaises(GeoNodeException, lambda: layer_type('foo.gml'))
def test_get_files(self):
from geonode.maps.utils import get_files
import shutil
import tempfile
# Check that a well-formed Shapefile has its components all picked up
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
gotten_files = get_files(os.path.join(d, "foo.shp"))
gotten_files = dict((k, v[len(d) + 1:]) for k, v in gotten_files.iteritems())
self.assertEquals(gotten_files, dict(base="foo.shp", shp="foo.shp", shx="foo.shx",
prj="foo.prj", dbf="foo.dbf"))
finally:
if d is not None:
shutil.rmtree(d)
# Check that a Shapefile missing required components raises an exception
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.shp", "foo.shx", "foo.prj"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.shp")))
finally:
if d is not None:
shutil.rmtree(d)
# Check that including an SLD with a valid shapefile results in the SLD getting picked up
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf", "foo.sld"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
gotten_files = get_files(os.path.join(d, "foo.shp"))
gotten_files = dict((k, v[len(d) + 1:]) for k, v in gotten_files.iteritems())
self.assertEquals(gotten_files, dict(base="foo.shp", shp="foo.shp", shx="foo.shx",
prj="foo.prj", dbf="foo.dbf", sld="foo.sld"))
finally:
if d is not None:
shutil.rmtree(d)
# Check that capitalized extensions are ok
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
gotten_files = get_files(os.path.join(d, "foo.SHP"))
gotten_files = dict((k, v[len(d) + 1:]) for k, v in gotten_files.iteritems())
self.assertEquals(gotten_files, dict(base="foo.SHP", shp="foo.SHP", shx="foo.SHX",
prj="foo.PRJ", dbf="foo.DBF"))
finally:
if d is not None:
shutil.rmtree(d)
# Check that mixed capital and lowercase extensions are ok
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.SHP", "foo.shx", "foo.pRJ", "foo.DBF"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
gotten_files = get_files(os.path.join(d, "foo.SHP"))
gotten_files = dict((k, v[len(d) + 1:]) for k, v in gotten_files.iteritems())
self.assertEquals(gotten_files, dict(base="foo.SHP", shp="foo.SHP", shx="foo.shx",
prj="foo.pRJ", dbf="foo.DBF"))
finally:
if d is not None:
shutil.rmtree(d)
# Check that including both capital and lowercase extensions raises an exception
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF", "foo.shp", "foo.shx", "foo.prj", "foo.dbf"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.SHP")))
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.shp")))
finally:
if d is not None:
shutil.rmtree(d)
# Check that including both capital and lowercase PRJ (this is special-cased in the implementation)
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF", "foo.prj"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.SHP")))
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.shp")))
finally:
if d is not None:
shutil.rmtree(d)
# Check that including both capital and lowercase SLD (this is special-cased in the implementation)
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF", "foo.SLD", "foo.sld"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.SHP")))
self.assertRaises(GeoNodeException, lambda: get_files(os.path.join(d, "foo.shp")))
finally:
if d is not None:
shutil.rmtree(d)
def test_get_valid_name(self):
from geonode.maps.utils import get_valid_name
self.assertEquals(get_valid_name("blug"), "blug")
self.assertEquals(get_valid_name("<-->"), "_")
self.assertEquals(get_valid_name("<ab>"), "_ab_")
self.assertEquals(get_valid_name("CA"), "CA_1")
self.assertEquals(get_valid_name("CA"), "CA_1")
def test_get_valid_layer_name(self):
from geonode.maps.utils import get_valid_layer_name
self.assertEquals(get_valid_layer_name("blug", False), "blug")
self.assertEquals(get_valid_layer_name("blug", True), "blug")
self.assertEquals(get_valid_layer_name("<ab>", False), "_ab_")
self.assertEquals(get_valid_layer_name("<ab>", True), "<ab>")
self.assertEquals(get_valid_layer_name("<-->", False), "_")
self.assertEquals(get_valid_layer_name("<-->", True), "<-->")
self.assertEquals(get_valid_layer_name("CA", False), "CA_1")
self.assertEquals(get_valid_layer_name("CA", False), "CA_1")
self.assertEquals(get_valid_layer_name("CA", True), "CA")
self.assertEquals(get_valid_layer_name("CA", True), "CA")
layer = Layer.objects.get(name="CA")
self.assertEquals(get_valid_layer_name(layer, False), "CA_1")
self.assertEquals(get_valid_layer_name(layer, True), "CA")
self.assertRaises(GeoNodeException, get_valid_layer_name, 12, False)
self.assertRaises(GeoNodeException, get_valid_layer_name, 12, True)
def test_cleanup(self):
from geonode.maps.utils import cleanup
from geoserver.catalog import FailedRequestError
from mock import patch
self.assertRaises(GeoNodeException, cleanup, "CA", "1234")
cleanup("FOO", "1234")
def blowup(self):
raise FailedRequestError()
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_catalog:
mock_catalog.get_store.return_value = None
cleanup("FOO", "1234")
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_catalog:
mock_catalog.get_store.side_effect = blowup
cleanup("FOO", "1234")
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_catalog:
mock_catalog.get_layer.return_value = None
cleanup("FOO", "1234")
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_catalog:
mock_catalog.delete.side_effect = blowup
cleanup("FOO", "1234")
def test_check_geonode_is_up(self):
from contextlib import nested
from geonode.maps.utils import check_geonode_is_up
from mock import patch
def blowup():
raise Exception("BOOM")
with patch('geonode.maps.models.Layer.objects.gs_catalog') as mock_gs:
mock_gs.get_workspaces.side_effect = blowup
self.assertRaises(GeoNodeException, check_geonode_is_up)
with nested(
patch('geonode.maps.models.Layer.objects.gs_catalog'),
patch('geonode.maps.models.Layer.objects.geonetwork')
) as (mock_gs, mock_gn):
mock_gn.login.side_effect = blowup
self.assertRaises(GeoNodeException, check_geonode_is_up)
self.assertTrue(mock_gs.get_workspaces.called)
with nested(
patch('geonode.maps.models.Layer.objects.gs_catalog'),
patch('geonode.maps.models.Layer.objects.geonetwork')
) as (mock_gs, mock_gn):
# no assertion, this should just run without error
check_geonode_is_up()
def test_save(self):
import shutil
import tempfile
from contextlib import nested
from geonode.maps.utils import save
# Check that including both capital and lowercase SLD (this is special-cased in the implementation)
d = None
try:
d = tempfile.mkdtemp()
for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf", "foo.sld", "foo.sld"):
path = os.path.join(d, f)
# open and immediately close to create empty file
open(path, 'w').close()
class MockWMS(object):
def __init__(self):
self.contents = { 'geonode:a_layer': 'geonode:a_layer' }
def __getitem__(self, idx):
return self.contents[idx]
with nested(
patch.object(geonode.maps.models, '_wms', new=MockWMS()),
patch('geonode.maps.models.Layer.objects.gs_catalog'),
patch('geonode.maps.models.Layer.objects.geonetwork')
) as (mock_wms, mock_gs, mock_gn):
# Setup
mock_gs.get_store.return_value.get_resources.return_value = []
mock_resource = mock_gs.get_resource.return_value
mock_resource.name = 'a_layer'
mock_resource.title = 'a_layer'
mock_resource.abstract = 'a_layer'
mock_resource.store.name = "a_layer"
mock_resource.store.resource_type = "dataStore"
mock_resource.store.workspace.name = "geonode"
mock_resource.native_bbox = ["0", "0", "0", "0"]
mock_resource.projection = "EPSG:4326"
mock_gn.url_for_uuid.return_value = "http://example.com/metadata"
# Exercise
base_file = os.path.join(d, 'foo.shp')
owner = User.objects.get(username="admin")
save('a_layer', base_file, owner)
# Assertions
(md_link,) = mock_resource.metadata_links
md_mime, md_spec, md_url = md_link
self.assertEquals(md_mime, "text/xml")
self.assertEquals(md_spec, "TC211")
self.assertEquals(md_url, "http://example.com/metadata")
finally:
if d is not None:
shutil.rmtree(d)
| gpl-3.0 |
nanuxbe/django-modeltranslation | modeltranslation/management/commands/loaddata.py | 8 | 2572 | from django import VERSION
from django.core.management.commands.loaddata import Command as LoadDataCommand
# Because this command is used (instead of default loaddata), then settings have been imported
# and we can safely import MT modules
from modeltranslation import settings as mt_settings
from modeltranslation.utils import auto_populate
ALLOWED = (None, False, 'all', 'default', 'required')
ALLOWED_FOR_PRINT = ', '.join(str(i) for i in (0, ) + ALLOWED[1:]) # For pretty-printing
def check_mode(option, opt_str, value, parser, namespace=None):
if value == '0' or value.lower() == 'false':
value = False
if value not in ALLOWED:
raise ValueError("%s option can be only one of: %s" % (opt_str, ALLOWED_FOR_PRINT))
setattr(namespace or parser.values, option.dest, value)
class Command(LoadDataCommand):
leave_locale_alone = mt_settings.LOADDATA_RETAIN_LOCALE # Django 1.6
help = ('Using this option will cause fixtures to be loaded under auto-population MODE.' +
'Allowed values are: %s' % ALLOWED_FOR_PRINT)
if VERSION < (1, 8):
from optparse import make_option
option_list = LoadDataCommand.option_list + (
make_option('--populate', action='callback', callback=check_mode, type='string',
dest='populate', metavar='MODE', help=help),
)
else:
import argparse
class CheckAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
check_mode(self, option_string, value, parser, namespace)
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--populate', action=self.CheckAction, type=str, dest='populate',
metavar='MODE', help=self.help)
def __init__(self):
super(Command, self).__init__()
if mt_settings.LOADDATA_RETAIN_LOCALE and VERSION < (1, 6):
from django.utils import translation
self.locale = translation.get_language()
def handle(self, *fixture_labels, **options):
if self.can_import_settings and hasattr(self, 'locale'):
from django.utils import translation
translation.activate(self.locale)
mode = options.get('populate')
if mode is not None:
with auto_populate(mode):
return super(Command, self).handle(*fixture_labels, **options)
else:
return super(Command, self).handle(*fixture_labels, **options)
| bsd-3-clause |
jnv/asciibooth | asciibooth/__main__.py | 1 | 3498 | from threading import Semaphore
from rx import Observable
from rx.concurrency import Scheduler
from time import sleep
from .input import observe_mouse, observe_keyboard, observe_getch
from . import ascii
from . import screen
from . import config
from . import raster
from . import output
from .statuses import status_generator
from .state import State, set_callback
from .camera import Camera
CAMERA = Camera()
OUTPUT_CONVERTOR = ascii.Convertor(stream_size=config.CAPTURE_RESIZE, size=config.RENDER_CHARS, contrast=config.RENDER_CONTRAST)
RASTERIZER = raster.TextImage(config.RASTER_FONT)
STATUS_GEN = status_generator()
THREADS = []
QUIT_LOCK = Semaphore(0)
STATE = State()
@set_callback(STATE, 'onready')
def ready(e):
screen.status('Ready.', clear=True)
CAMERA.start_preview()
return True
@set_callback(STATE, 'oncapturing')
def beforecapture(e):
pass
@set_callback(STATE, 'oncapture')
def capture_image(e):
stream = CAMERA.capture()
screen.status('Processing...')
CAMERA.stop_preview()
text = OUTPUT_CONVERTOR.convert(stream)
screen.output(text, center=True)
if e.output:
STATE.trigger('output', text=text, share=e.share)
else:
STATE.idle()
@set_callback(STATE, 'onidle')
def idle(e):
sleep(3)
STATE.done()
@set_callback(STATE, 'onoutput')
def output_image(e):
text = e.text
share = e.share
image = RASTERIZER.draw_image(text, footer=output.footer_line())
image = raster.save_image(image).getvalue()
try:
output.save_image(image)
if share:
try:
screen.status('Printing...', newline=False)
output.printjob(image)
except Exception as e:
print(e)
try:
screen.status('Sending to Twitter...')
output.tweet(image, text=next(STATUS_GEN))
except Exception as e:
print(e)
except Exception as e:
print(e)
# raise e
STATE.idle()
def quit():
screen.status('Quitting.')
CAMERA.stop()
# XXX This is needed to release capture
for thread in THREADS:
try:
thread.stop()
except Exception as e:
print('Error stopping the thread: {}. Feel free to ignore that.'.format(e))
THREADS.remove(thread)
QUIT_LOCK.release()
ACTIONS_MAP = {
'c': Observable.to_async(lambda: STATE.capture(output=True,share=True), Scheduler.new_thread),
't': Observable.to_async(lambda: STATE.capture(output=False,share=True), Scheduler.new_thread),
's': Observable.to_async(lambda: STATE.capture(output=True,share=False), Scheduler.new_thread),
'p': CAMERA.toggle_preview,
'q': quit,
}
def run_action(key):
if key in ACTIONS_MAP:
# print('running action: ', key)
ACTIONS_MAP[key]()
else:
print('unknown action: ', key)
def main():
mo, clicks = observe_mouse(config.INPUT_CAPTURE_MOUSE)
clicks = clicks.map(lambda click: 'c')
ko, strokes = observe_keyboard(config.INPUT_CAPTURE_KEYBOARD)
THREADS.extend((mo, ko))
obs = Observable.merge(clicks, strokes)
if config.INPUT_READCH:
gcho, ch = observe_getch()
THREADS.append(gcho)
obs = obs.merge(ch)
obs.subscribe(Observable.to_async(run_action), lambda e: print(e))
STATE.done()
# Block on semaphore and keep the rest of threads do the thingy
QUIT_LOCK.acquire()
try:
main()
except Exception as e:
quit()
raise e
| cc0-1.0 |
yunti/cookiecutter-django | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/views.py | 140 | 1445 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from braces.views import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
| bsd-3-clause |
imchipwood/iSOBOT_ESP8266 | iSOBOT_Control/isobot.py | 1 | 14020 | __author__ = "chawood@pdx.edu"
import requests
import ping
import socket
import time
import re
# Code originally pulled from here
# http://web.cecs.pdx.edu/~mperkows/CLASS_479/Projects-2012/Mathias_Sunardi-iSobot_controller_report.pdf
# And subsequently changed entirely for use with ESP8266s over wifi
commands = {
'sleep': 'sleep',
'CMD_SLEEP': 'sleep',
'CMD_STOP': 0x00,
'CMD_RC': 0x07,
'CMD_PM': 0x08,
'CMD_SA': 0x09,
'CMD_VC': 0x0a,
'CMD_1P': 0x13, # left punch
'CMD_2P': 0x14, # right punch
'CMD_3P': 0x15, # left side whack (arm outwards)
'CMD_4P': 0x16, # right side whack
'CMD_11P': 0x17, # left + right punch
'CMD_12P': 0x18, # right + left punch
'CMD_13P': 0x19, # left up-down chop
'CMD_14P': 0x1a, # right up-down chop
'CMD_21P': 0x1b, # both up-down chop
'CMD_22P': 0x1c, # both down-up chop
'CMD_23P': 0x1d, # right + left punch, both up-down chop, both whack
'CMD_24P': 0x1e, # look left, up-down chop
'CMD_31P': 0x1f, # look right, up-down chop
'CMD_32P': 0x20, # "c'mon, snap out of it" slap
'CMD_34P': 0x21, # both whack
'CMD_1K': 0x22, # left wide kick
'CMD_2K': 0x23, # right wide kick
'CMD_3K': 0x24, # left kick
'CMD_4K': 0x25, # right kick
'CMD_11K': 0x26, # left side kick
'CMD_12K': 0x27, # right side kick
'CMD_13K': 0x28, # left back kick
'CMD_14K': 0x29, # right back kick
'CMD_31K': 0x2a, # right high side kick
'CMD_42K': 0x2b, # right soccer/low kick
'CMD_21K': 0x2c, # left + right high side kick
'CMD_22K': 0x2d, # right + left soccer/low kick
'CMD_23K': 0x2e, # combo kick low-left, high-side-right, left
'CMD_24K': 0x2f, # another left kick
'CMD_31K': 0x30, # right high kick
'CMD_34K': 0x31, # split
'CMD_1G': 0x32, # Block! "whoa buddy"
'CMD_2G': 0x33, # right arm block
'CMD_3G': 0x34, #
'CMD_4G': 0x35, # both arms block
'CMD_11G': 0x36, # dodge right (move left)
'CMD_12G': 0x37, # dodge left (move right)
'CMD_13G': 0x38, # headbutt
'CMD_14G': 0x39, # right arm to face
'CMD_21G': 0x3a, # taunt1
'CMD_22G': 0x3b, # hit & down
'CMD_23G': 0x3c, # dodge right, left, block left, head, fall down
'CMD_A': 0x3d,
'CMD_B': 0x3e,
'CMD_1A': 0x3f, # "Roger!" raise right arm
'CMD_2A': 0x40, # weird gesture
'CMD_2A': 0x41, # "All your base are belong to isobot"
'CMD_3A': 0x42, # "absolutely not!" flaps both arms
'CMD_4A': 0x43, # bow/crouch? and get back up
'CMD_11A': 0x44, # "Good morning!" raise both arms, stand on left foot
'CMD_12A': 0x45, # "Greetings I come in peace" wave right arm
'CMD_13A': 0x46, # "Y'all come back now, you hear!"
'CMD_14A': 0x47, # "Wassap!?" opens both arms sideways over and down
'CMD_21A': 0x48, # "Greetings human" raise left arm and bow
'CMD_22A': 0x49, # "It's an honor to meet you!" bow and shake right hand
'CMD_23A': 0x4a, # "Bye bye"
'CMD_31A': 0x4b, # "Bon voyage!"
'CMD_32A': 0x4c, # *clap* *clap* "Thanks! I'll be here all week" raise right arm
'CMD_33A': 0x4d, # "T-t-that's all robots!" raise left arm, stand on left foot
'CMD_41A': 0x4e, # "Domo arigato from isobot-o"
'CMD_42A': 0x4f,
'CMD_43A': 0x50,
'CMD_111A': 0x51,
'CMD_222A': 0x52,
'CMD_333A': 0x53,
'CMD_11B': 0x54, # Walk forward + "Give me a bear hug"
'CMD_12B': 0x55,
'CMD_13B': 0x56,
'CMD_14B': 0x57,
'CMD_31B': 0x58,
'CMD_22B': 0x59,
'CMD_23B': 0x5a,
'CMD_24B': 0x5b,
'CMD_31B': 0x5c,
'CMD_32B': 0x5d, # "woe is me ... what to do ... what to do" bow, shakes head
'CMD_33B': 0x5e, # "No no .... not again. ... No no"
'CMD_234B': 0x5f, # "Oh, I can't believe I did that"
'CMD_41B': 0x60, # "I throw myself into a mercy" (?)
'CMD_42B': 0x61, # "Oh, like a dagger through my heart"
'CMD_43B': 0x62, # Same as 44B but no voice
'CMD_44B': 0x63, # "Ouch, that hurts!"
'CMD_112A': 0x65, # points left "wahoo"
'CMD_113A': 0x66, # pose northwest "hoo-ah!"
'CMD_114A': 0x67, # points left "kapwingg"
'CMD_124A': 0x6b, # "iz nice. you like?"
'CMD_131A': 0x6c, # both arm wave left right left
'CMD_132A': 0x6d, # drunk
'CMD_113B': 0x6e, # "no please make it stop." "please i can't take it anymore" "no no" lying down and get up
'CMD_114B': 0x6f, # "yippe yippe" 3 times, goal post arms
'CMD_121B': 0x70, # "ho ho ho ... <something-something> isobot"
'CMD_122B': 0x71, # "yeehaaw" both arm wave left right
'CMD_123B': 0x72,
'CMD_124B': 0x73, # stand on one foot, goal post arms, "wow that's amazing"
'CMD_131B': 0x74, # bow, arms over head and down
'CMD_132B': 0x75,
'CMD_133B': 0x76,
'CMD_134B': 0x77,
'CMD_141A': 0x78,
'CMD_143A': 0x79, # sit cross legged
'CMD_144A': 0x7b, # ... owl?
'CMD_211B': 0x7c,
'CMD_212B': 0x7d, # "Ahh, let me get comfortable. I'm too sexy for my servos" lie down, flips over, gets up
'CMD_213B': 0x7e,
'CMD_221B': 0x80, # balancing act + bleeps (+)
'CMD_222B': 0x81, # looks like a push up
'CMD_223B': 0x82,
'CMD_224B': 0x83, # "You can count on me"
'CMD_232B': 0x85,
'CMD_233B': 0x86,
'CMD_241B': 0x88, # headstand
'CMD_242B': 0x89,
'CMD_A': 0x8a, # flip forward back forward about 3 times
'CMD_B': 0x8b,
'CMD_AB': 0x8c,
'CMD_AAA': 0x8d,
'CMD_BBB': 0x8e,
'CMD_BAB': 0x8f, # "BANZAI" 3 times
'CMD_ABB': 0x95, # chicken
'CMD_BBA': 0x97, # dancing (+)
'CMD_ABA': 0x98, # giant robot motion
'CMD_ABAB': 0x99,
'CMD_AAAA': 0x9a,
'CMD_FWRD': 0xb7,
'CMD_BWRD': 0xb8,
'CMD_FWLT': 0xb9,
'CMD_FWRT': 0xba,
'CMD_LEFT': 0xbb,
'CMD_RGHT': 0xbc,
'CMD_BKLT': 0xbd,
'CMD_BKRT': 0xbe,
'CMD_411A': 0xc7,
'CMD_412A': 0xc8,
'CMD_413A': 0xc9,
'CMD_444B': 0xca,
'CMD_444A': 0xcb, # nothing
'CMD_LVSoff': 0xd3,
'CMD_HP': 0xd5,
'CMD_NOIMP': 0xd6,
'CMD_END': 0xd7,
'MSG_NOIMP': 0x848080,
'MSG_RUP': 0x878280,
'MSG_RDW': 0x808280,
'MSG_RRT': 0x8480f0,
'MSG_RLT': 0x848080,
'MSG_LUP': 0x84f080,
'MSG_LDW': 0x841080,
'MSG_LRT': 0xec8080,
'MSG_LLT': 0x0c8080,
# Bonus Commands
'CMD_TURNON': 0x01,
'CMD_ACTIVATED': 0x02,
'CMD_READY': 0x03,
'CMD_RC_CONFIRM': 0x04,
'CMD_RC_PROMPT': 0x05,
'CMD_MODE_PROMPT': 0x06,
'CMD_IDLE_PROMPT': 0x0B, #': 0x0C,= 0x0D,= 0x0E all the same
'CMD_HUMMING_PROMPT': 0x0F,
'CMD_COUGH_PROMPT': 0x10,
'CMD_TIRED_PROMPT': 0x11,
'CMD_SLEEP_PROMPT': 0x12,
'CMD_FART': 0x40, # 2A
'CMD_SHOOT_RIGHT': 0x64,
'CMD_SHOOT_RIGHT2': 0x68,
'CMD_SHOOT2': 0x69,
'CMD_BEEP': 0x6a,
'CMD_BANZAI': 0x7F, # "TAKARA TOMY"
'CMD_CHEER1': 0x90,
'CMD_CHEER2': 0x91,
'CMD_DOG': 0x92,
'CMD_CAR': 0x93,
'CMD_EAGLE': 0x94,
'CMD_ROOSTER': 0x95,
'CMD_GORILLA': 0x96,
'CMD_LOOKOUT': 0xA1,
'CMD_STORY1': 0xA2, # knight and princess
'CMD_STORY2': 0xA3, # ready to start day
'CMD_GREET1': 0xA4, # good morning
'CMD_GREET2': 0xA5, # do somthing fun
'CMD_POOP': 0xA6, # poops his pants
'CMD_GOOUT': 0xA7, # ready to go out dancing
'CMD_HIBUDDY': 0xA8, # .. bring a round of drinks
'CMD_INTRODUCTION': 0xA9,
'CMD_ATYOURSERVICE': 0xAA,
'CMD_SMELLS': 0xAB,
'CMD_THATWASCLOSE': 0xAC,
'CMD_WANNAPICEOFME': 0xAD,
'CMD_RUNFORYOURLIFE': 0xAE,
'CMD_TONEWTODIE': 0xAF,
# 0xB0 - nothing?
'CMD_SWANLAKE': 0xB1,
'CMD_DISCO': 0xB2,
'CMD_MOONWALK': 0xB3,
'CMD_REPEAT_PROMPT': 0xB4,
'CMD_REPEAT_PROMPT2': 0xB5,
'CMD_REPEAT_PROMPT3': 0xB6,
# 0xB7-= 0xC4 single steps in different directions
'CMD_HEADSMASH': 0xC5,
'CMD_HEADHIT': 0xC6,
# 0xCC-= 0xD2 - unknown (use param?)
# after exercising one of these I am getting only beeps instead of voice/sounds
# (looks like a tool to synchronize sound with moves)
'CMD_HIBEEP': 0xD3,
# 0xD4 - unknown (use param?)
'CMD_BEND_BACK': 0xD8, # same untill': 0xDB
'CMD_SQUAT': 0xDB, # also': 0xDC # doesn't work (both)
'CMD_BEND_FORWARD': 0xDD,
'CMD_HEAD_LEFT_60': 0xDE,
'CMD_HEAD_LEFT_45': 0xDF,
'CMD_HEAD_LEFT_30': 0xE0,
'CMD_HEAD_RIGHT_30': 0xE1,
'CMD_HEAD_RIGHT_45': 0xE2,
'CMD_HEAD_RIGHT_60': 0xE3,
# seems identical to A & B getups
'CMD_GETUP_BELLY': 0xE4, # sleep for 6 seconds for this one
'CMD_GETUP_BACK': 0xE5, # need to sleep for at least 13 seconds after this one - it's a long motion
# E6 unknown
'CMD_HEAD_SCAN_AND_BEND': 0xE7,
'CMD_ARM_TEST': 0xE8,
'CMD_FALL_AND_LEG_TEST': 0xE9,
'CMD_THANKYOUSIR': 0xEA,
'CMD_ILOVEYOU_SHORT': 0xEB,
'CMD_3BEEPS': 0xEC,
'CMD_FALL_DEAD': 0xED,
'CMD_3BEEPS_AND_SLIDE': 0xEE,
# 'EF-FF': unknown
}
class iSobotException(Exception):
pass
class iSobot(object):
def __init__(self, url="192.168.4.1", channel=0, debug=False):
print("{} - channel {}".format(url, channel))
self._url = None
self.url = url
assert channel in [0, 1]
self.channel = channel
self.debug = debug
@staticmethod
def _ping(url):
try:
print("{} - Checking connection ".format(url))
# Can't ping a url with http:// or https:// on it for some reason
testurl = url.replace("http://", "").replace("https://", "")
ping.do_one(testurl, timeout=5, psize=5)
except socket.error as e:
print("{} - ERROR Can't connect".format(url))
# raise e
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._ping(url)
self._url = url
"""
# Construct command string
# Returns integer. To use: convert returned value using hex() then process as array of characters excluding '0x'
# How to construct isobot command string:
## command = [channel (1 bit)]:[type (2 bits)]:[checksum (3 bits)]:[commandbyte1 (8 bits)]:[commandbyte2 (8 bits)]:[params (8 bits)]
## channel: 0 -> Mode A, 1 -> Mode B
## type: 00 -> Type 0, 01 -> Type 1
## checksum: How to calculate:
### 1. add the header bits (channel, type, and checksum). For this, just give checksum 0x00 in the calculation.
### After the calculation, this value will be updated.
### 2. Do sum (logical OR) on the sum bits, 3 bits at a time. (see below: implemented as 3-bits right-shift)
### 3. Return the last three bits of this value as the checksum.
### 4. Add the checksum to the header bits (just do normal +)
## commandbyte1: see isobot.py for the command bytes
## commandbyte2: see isobot.py for the command bytes. Not used in command Type 1
## params: ALWAYS 0x03 (don't know what it is for)
# Example:
## For Mode A (channel bit: 0), Type 1 (type bits: 01), checksum (bits: 000):
### header_bits = channel:type:checksum
### = 0:01:000
### Notice this is a 6-bits string. You must look at it as a byte.
### header_bits (as byte, in hex) = 00001000 = 0x08
## For Mode B (channel bit: 1), Type 1 (type bits: 01), checksum (bits: 000):
### header_bits = 1:01:000
### header_bits (as byte, in hex) = 00101000 = 0x28
## Walk forward byte: CMD_FWRD = 0xb7 = 10110111 (see isobot.py)
## Params: 0x03 = 00000011
## command string in Mode A, Type 1 (checksum not calculated yet): [header_bits]:[walkforwardbyte]:[params] = [00101000]:[10110111]:[00000011]
## Caculate checksum:
### sum = 0x28 + 0xb7 + 0x03
### = 226 = 0xe2 = 11100010
### take and sum 3 bits at a time (i.e. scan 3 bits at a time from right to left)
### 010 + 100 + 011 (padded with zero) = (1)001
### The total is actually 9 (0x09) but we only use the last three bits. So checksum = 0x01
## Add the checksum to the header bits:
### 0x28 + 0x01 = 0x29 = 00101001
## The command string becomes: [00101000]:[10110111]:[00000011] = 0x29b703
"""
def calcChecksum(self, header, byte0, byte1, byte2):
s = header + byte0 + byte1 + byte2
s = (s & 7) + ((s >> 3) & 7) + ((s >> 6) & 7)
return s & 7
def makeCmd(self, cmdType=1, cmd1=0, cmd2=0):
if cmdType not in [0, 1]:
raise iSobotException("{} - Channel or command type invalid. Valid channels/types are '0' and '1'".format(self.url))
# param is constant @ 0x03
param = 0x03
# Calculate checksum
header = (self.channel << 5) + (cmdType << 3)
checksum = self.calcChecksum(header, cmd1, cmd2, param)
headersum = header + checksum
if cmdType == 0:
# two cmds
return hex((headersum << 24) + (cmd1 << 16) + (cmd2 << 8) + param)
else:
# one command
return hex((headersum << 16) + (cmd1 << 8) + param)
def sendCmd(self, cmd):
try:
url = "{}/cmd:{}".format(self._url, cmd)
r = requests.post(url, data={'cmd': cmd}, timeout=5)
if r.status_code == 200:
if self.debug:
print("{} - HTTP Post success!".format(url.replace('\r', '')))
else:
print("{} - HTTP Post failed. Status, reason: {}, {}".format(self.url, r.status_code, r.reason))
except requests.exceptions.ConnectionError as e:
print("{} - HTTP post failed: {}".format(url, e))
except Exception as e:
raise e
# Repeat sending command
# Default # of tries: 300. Some actions (e.g. Walk) require the command to be sent for a period of time.
# e.g. sending the Walk FWRD command once, the robot will accept the command but not move forward
def repeatCmd(self, cmd, rep=300):
for i in range(rep):
self.sendCmd(cmd)
time.sleep(0.1)
def formatType1Cmd(self, cmd):
"""Format the hex string"""
# Remove leading 0x in hex string
cmd = re.sub(r'0x', '', cmd)
# The string must be 6 digits long
cmd = cmd.zfill(6)
# Needs carriage return
cmd += '\r'
if self.debug:
print("{} - Command string: {}".format(self.url, cmd))
return cmd
def isobotDoType1(self, action, repeat=3):
# Shorthand function for lazy people
try:
cmd = self.formatType1Cmd(self.makeCmd(1, action))
self.repeatCmd(cmd, repeat)
except Exception as e:
print("{} - FAILED: action {}, repeat {}".format(self.url, action, repeat))
raise e
return True
if __name__ == "__main__":
bot = iSobot()
cmds = [
(commands['CMD_CHEER1'], 1),
('sleep', 8),
(0x00, 2),
(commands['CMD_FWRD'], 10),
(commands['CMD_GORILLA'], 1),
# (bot.CMD_MOONWALK),
# (bot.CMD_42B),
# (bot.CMD_1P),
# (bot.CMD_3BEEPS),
# (bot.CMD_2G),
# (0x00),
# (bot.CMD_21K),
# (bot.CMD_22K),
]
try:
for cmd, repeat in cmds:
if isinstance(cmd, str):
print('sleeping {}'.format(repeat))
time.sleep(repeat)
else:
print("{:02x}".format(cmd))
bot.isobotDoType1(cmd, repeat=repeat)
except KeyboardInterrupt:
print("keyboard interrupt, stopping")
pass
except Exception as e:
raise e
| gpl-3.0 |
sdague/home-assistant | homeassistant/components/alexa/auth.py | 19 | 5572 | """Support for Alexa skill auth."""
import asyncio
from datetime import timedelta
import json
import logging
import aiohttp
import async_timeout
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, HTTP_OK
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.util import dt
_LOGGER = logging.getLogger(__name__)
LWA_TOKEN_URI = "https://api.amazon.com/auth/o2/token"
LWA_HEADERS = {"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
PREEMPTIVE_REFRESH_TTL_IN_SECONDS = 300
STORAGE_KEY = "alexa_auth"
STORAGE_VERSION = 1
STORAGE_EXPIRE_TIME = "expire_time"
STORAGE_ACCESS_TOKEN = "access_token"
STORAGE_REFRESH_TOKEN = "refresh_token"
class Auth:
"""Handle authentication to send events to Alexa."""
def __init__(self, hass, client_id, client_secret):
"""Initialize the Auth class."""
self.hass = hass
self.client_id = client_id
self.client_secret = client_secret
self._prefs = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._get_token_lock = asyncio.Lock()
async def async_do_auth(self, accept_grant_code):
"""Do authentication with an AcceptGrant code."""
# access token not retrieved yet for the first time, so this should
# be an access token request
lwa_params = {
"grant_type": "authorization_code",
"code": accept_grant_code,
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug(
"Calling LWA to get the access token (first time), with: %s",
json.dumps(lwa_params),
)
return await self._async_request_new_token(lwa_params)
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._prefs[STORAGE_ACCESS_TOKEN] = None
async def async_get_access_token(self):
"""Perform access token or token refresh request."""
async with self._get_token_lock:
if self._prefs is None:
await self.async_load_preferences()
if self.is_token_valid():
_LOGGER.debug("Token still valid, using it")
return self._prefs[STORAGE_ACCESS_TOKEN]
if self._prefs[STORAGE_REFRESH_TOKEN] is None:
_LOGGER.debug("Token invalid and no refresh token available")
return None
lwa_params = {
"grant_type": "refresh_token",
"refresh_token": self._prefs[STORAGE_REFRESH_TOKEN],
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug("Calling LWA to refresh the access token")
return await self._async_request_new_token(lwa_params)
@callback
def is_token_valid(self):
"""Check if a token is already loaded and if it is still valid."""
if not self._prefs[STORAGE_ACCESS_TOKEN]:
return False
expire_time = dt.parse_datetime(self._prefs[STORAGE_EXPIRE_TIME])
preemptive_expire_time = expire_time - timedelta(
seconds=PREEMPTIVE_REFRESH_TTL_IN_SECONDS
)
return dt.utcnow() < preemptive_expire_time
async def _async_request_new_token(self, lwa_params):
try:
session = aiohttp_client.async_get_clientsession(self.hass)
with async_timeout.timeout(10):
response = await session.post(
LWA_TOKEN_URI,
headers=LWA_HEADERS,
data=lwa_params,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling LWA to get auth token")
return None
_LOGGER.debug("LWA response header: %s", response.headers)
_LOGGER.debug("LWA response status: %s", response.status)
if response.status != HTTP_OK:
_LOGGER.error("Error calling LWA to get auth token")
return None
response_json = await response.json()
_LOGGER.debug("LWA response body : %s", response_json)
access_token = response_json["access_token"]
refresh_token = response_json["refresh_token"]
expires_in = response_json["expires_in"]
expire_time = dt.utcnow() + timedelta(seconds=expires_in)
await self._async_update_preferences(
access_token, refresh_token, expire_time.isoformat()
)
return access_token
async def async_load_preferences(self):
"""Load preferences with stored tokens."""
self._prefs = await self._store.async_load()
if self._prefs is None:
self._prefs = {
STORAGE_ACCESS_TOKEN: None,
STORAGE_REFRESH_TOKEN: None,
STORAGE_EXPIRE_TIME: None,
}
async def _async_update_preferences(self, access_token, refresh_token, expire_time):
"""Update user preferences."""
if self._prefs is None:
await self.async_load_preferences()
if access_token is not None:
self._prefs[STORAGE_ACCESS_TOKEN] = access_token
if refresh_token is not None:
self._prefs[STORAGE_REFRESH_TOKEN] = refresh_token
if expire_time is not None:
self._prefs[STORAGE_EXPIRE_TIME] = expire_time
await self._store.async_save(self._prefs)
| apache-2.0 |
apark263/tensorflow | tensorflow/python/kernel_tests/compare_and_bitpack_op_test.py | 7 | 2496 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.compare_and_bitpack_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CompareAndBitpackTest(test.TestCase):
def _testCompareAndBitpack(self,
x, threshold,
truth,
expected_err_re=None):
with test_util.use_gpu():
ans = math_ops.compare_and_bitpack(x, threshold)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertShapeEqual(truth, ans)
self.assertAllEqual(tf_ans, truth)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBasic(self, dtype):
rows = 371
cols = 294
x = np.random.randn(rows, cols * 8)
if dtype == np.bool:
x = x > 0
else:
x = x.astype(dtype)
threshold = dtype(0)
# np.packbits flattens the tensor, so we reshape it back to the
# expected dimensions.
truth = np.packbits(x > threshold).reshape(rows, cols)
self._testCompareAndBitpack(x, threshold, truth)
def testBasicFloat32(self):
self._testBasic(np.float32)
def testBasicFloat64(self):
self._testBasic(np.float64)
def testBasicFloat16(self):
self._testBasic(np.float16)
def testBasicBool(self):
self._testBasic(np.bool)
def testBasicInt8(self):
self._testBasic(np.int8)
def testBasicInt16(self):
self._testBasic(np.int16)
def testBasicInt32(self):
self._testBasic(np.int32)
def testBasicInt64(self):
self._testBasic(np.int64)
if __name__ == "__main__":
test.main()
| apache-2.0 |
gnuhub/intellij-community | python/lib/Lib/site-packages/django/template/context.py | 78 | 5383 | from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class BaseContext(object):
def __init__(self, dict_=None):
dict_ = dict_ or {}
self.dicts = [dict_]
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self):
d = {}
self.dicts.append(d)
return d
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None, use_l10n=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.current_app = current_app
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
d = self.dicts[-1]
if key in d:
return d[key]
return otherwise
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict=None, processors=None, current_app=None, use_l10n=None):
Context.__init__(self, dict, current_app=current_app, use_l10n=use_l10n)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
self.update(processor(request))
| apache-2.0 |
buckket/weltklang | lib/rfk/database/base.py | 3 | 14398 | import time
import hashlib
from datetime import timedelta
from passlib.hash import bcrypt
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref, exc
from sqlalchemy.dialects.mysql import INTEGER as Integer
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql.expression import case
import re
import os
from flask.ext.login import AnonymousUserMixin
import rfk.database
from rfk.types import SET, ENUM
from rfk import exc as rexc
from rfk import CONFIG
from rfk.database import Base, UTCDateTime
from rfk.database.show import UserShow, Show
from rfk.helper import now, get_path
class Anonymous(AnonymousUserMixin):
def __init__(self):
AnonymousUserMixin.__init__(self)
self.locale = 'de'
self.timezone = 'Europe/Berlin'
def get_locale(self):
return self.locale
def get_timezone(self):
return self.timezone
def has_permission(self, code=None, permission=None):
return False
class User(Base):
__tablename__ = 'users'
user = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
username = Column(String(50), unique=True)
password = Column(String(64))
mail = Column(String(255))
country = Column(String(3))
register_date = Column(UTCDateTime, default=now)
last_login = Column(UTCDateTime, default=None)
def get_id(self):
return unicode(self.user)
def is_anonymous(self):
return False
def is_active(self):
return True
def is_authenticated(self):
return True
def get_locale(self):
return self.get_setting(code='locale')
def get_timezone(self):
return self.get_setting(code='timezone')
@staticmethod
def authenticate(username, password):
"""shorthand function for authentication a user
returns the user object
Keyword arguments:
username -- username
password -- unencrypted password
"""
user = User.get_user(username=username)
if user.check_password(password):
return user
else:
raise rexc.base.InvalidPasswordException()
@staticmethod
def get_user(id=None, username=None):
assert id or username
try:
if username is None:
return User.query.filter(User.user == id).one()
else:
return User.query.filter(User.username == username).one()
except exc.NoResultFound:
raise rexc.base.UserNotFoundException
@staticmethod
def check_username(username):
if re.match('^[0-9a-zA-Z_-]{3,}$', username) is None:
return False
else:
return True
@staticmethod
def make_password(password):
return bcrypt.encrypt(password)
@staticmethod
def add_user(username, password):
if not User.check_username(username):
raise rexc.base.InvalidUsernameException
try:
User.query.filter(User.username == username).one()
raise rexc.base.UserNameTakenException()
except exc.NoResultFound:
user = User(username=username, password=User.make_password(password))
rfk.database.session.add(user)
rfk.database.session.flush()
return user
def check_password(self, password):
try:
return bcrypt.verify(password, self.password)
except ValueError:
if hashlib.sha1(password).hexdigest() == self.password:
self.password = User.make_password(password)
return True
else:
return False
def add_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return False
except exc.NoResultFound:
self.permissions.append(UserPermission(permission))
return True
def has_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return True
except exc.NoResultFound:
return False
def get_setting(self, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
try:
us = UserSetting.query.filter(UserSetting.user == self,
UserSetting.setting == setting).one()
return us.get_value()
except exc.NoResultFound:
return None
def set_setting(self, value, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
UserSetting.set_value(self, setting, value)
rfk.database.session.flush()
def get_total_streamtime(self):
"""Returns a timedelta Object with the users total time streamed"""
try:
return rfk.database.session.query(func.sum(Show.end - Show.begin)) \
.join(UserShow).filter(UserShow.status == UserShow.STATUS.STREAMED,
UserShow.user == self).first()[0]
except TypeError:
return timedelta(seconds=0)
def __repr__(self):
return "<USER username={0}>".format(self.username)
class Setting(Base):
__tablename__ = 'settings'
setting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
val_type = Column(Integer(unsigned=True))
TYPES = ENUM(['INT', 'STR'])
@staticmethod
def get_setting(code):
return Setting.query.filter(Setting.code == code).one()
@staticmethod
def add_setting(code, name, val_type):
try:
return Setting.query.filter(Setting.code == code).one()
except exc.NoResultFound:
return Setting(code=code, name=name, val_type=val_type)
class UserSetting(Base):
__tablename__ = 'user_settings'
userSetting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('settings'))
setting_id = Column("setting", Integer(unsigned=True),
ForeignKey('settings.setting',
onupdate="CASCADE",
ondelete="RESTRICT"))
setting = relationship("Setting")
val_int = Column(Integer)
val_str = Column(String(255))
def get_value(self):
if self.setting.val_type == Setting.TYPES.INT:
return self.val_int
elif self.setting.val_type == Setting.TYPES.STR:
return self.val_str
@staticmethod
def set_value(user, setting, value):
if value == True:
value = 1
elif value == False:
value = 0
try:
us = UserSetting.query.filter(UserSetting.user == user,
UserSetting.setting == setting).one()
except exc.NoResultFound:
us = UserSetting(user=user, setting=setting)
if us.setting.val_type == Setting.TYPES.INT:
us.val_int = value
elif us.setting.val_type == Setting.TYPES.STR:
us.val_str = value
class Permission(Base):
__tablename__ = 'permissions'
permission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
@staticmethod
def get_permission(code):
return Permission.query.filter(Permission.code == code).one()
@staticmethod
def add_permission(code, name):
try:
return Permission.query.filter(Permission.code == code).one()
except exc.NoResultFound:
return Permission(code=code, name=name)
class UserPermission(Base):
__tablename__ = 'user_permissions'
userPermission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('permissions', cascade="all, delete-orphan"))
permission_id = Column("permission", Integer(unsigned=True),
ForeignKey('permissions.permission',
onupdate="CASCADE",
ondelete="RESTRICT"))
permission = relationship("Permission", backref=backref('users', cascade="all, delete-orphan"))
def __init__(self, permission):
self.permission = permission
class Ban(Base):
__tablename__ = 'bans'
ban = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('bans'))
range = Column(String(50))
expiration = Column(UTCDateTime)
class News(Base):
__tablename__ = 'news'
news = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
time = Column(UTCDateTime, default=now())
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User")
title = Column(String(255))
content = Column(Text)
class ApiKey(Base):
__tablename__ = 'apikeys'
apikey = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref="apikeys")
key = Column(String(128))
counter = Column(Integer(unsigned=True), default=0)
access = Column(UTCDateTime, default=now())
application = Column(String(128))
description = Column(String(255))
flag = Column(Integer(unsigned=True), default=0)
FLAGS = SET(['DISABLED', 'FASTQUERY', 'KICK', 'BAN', 'AUTH'])
def gen_key(self):
c = 0
while True:
key = hashlib.sha1("%s%s%d%d" % (self.application, self.description, time.time(), c)).hexdigest()
if ApiKey.query.filter(ApiKey.key == key).first() == None:
break
self.key = key
@staticmethod
def check_key(key):
try:
apikey = ApiKey.query.filter(ApiKey.key == key).one()
except (exc.NoResultFound, exc.MultipleResultsFound):
raise rexc.api.KeyInvalidException()
if apikey.flag & ApiKey.FLAGS.DISABLED:
raise rexc.api.KeyDisabledException()
elif not apikey.flag & ApiKey.FLAGS.FASTQUERY:
if now() - apikey.access <= timedelta(seconds=1):
raise rexc.api.FastQueryException(last_access=apikey.access)
apikey.counter += 1
apikey.access = now()
return apikey
class Log(Base):
__tablename__ = 'log'
log = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
timestamp = Column(UTCDateTime, default=now)
severity = Column(Integer(unsigned=True))
module = Column(String(50))
message = Column(Text)
class Loop(Base):
__tablename__ = 'loops'
loop = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
begin = Column(Integer(unsigned=True), default=0)
end = Column(Integer(unsigned=True), default=1440)
filename = Column(String(50))
@hybrid_property
def length(self):
if (self.end >= self.begin):
return abs(self.end - self.begin)
else:
return abs((self.end + 2400) - self.begin)
@length.expression
def length(cls):
return func.abs(cast(case([(cls.begin <= cls.end, cls.end),
(cls.begin >= cls.end, cls.end + 2400)]), Integer) - cast(cls.begin, Integer))
@hybrid_method
def contains(self, point):
return case([(self.begin <= self.end, (self.begin <= point) & (self.end >= point)),
(self.begin >= self.end, (self.begin <= point) | (self.end >= point))])
@hybrid_property
def file_exists(self):
if self.filename is None:
return False
return os.path.exists(os.path.join(get_path(CONFIG.get('liquidsoap', 'looppath')), self.filename))
@staticmethod
def get_current_loop():
"""
returns the current loop to be scheduled
@todo maybe broken ;_;
"""
n = now()
#try to find a loop that should be running
loops = Loop.query.filter(Loop.contains(int(n.hour * 100 + (n.minute / 60.) * 100))).order_by(
Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
# we found no loops
# just try to find the longest one
loops = Loop.query.order_by(Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
#okay, now we have a problem, just retun none
return None
| bsd-3-clause |
raviflipsyde/servo | components/script/dom/bindings/codegen/parser/tests/test_implements.py | 264 | 5961 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
# Basic functionality
threw = False
try:
parser.parse("""
A implements B;
interface B {
attribute long x;
};
interface A {
attribute long y;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Should not have thrown on implements statement "
"before interfaces")
harness.check(len(results), 3, "We have three statements")
harness.ok(isinstance(results[1], WebIDL.IDLInterface), "B is an interface")
harness.check(len(results[1].members), 1, "B has one member")
A = results[2]
harness.ok(isinstance(A, WebIDL.IDLInterface), "A is an interface")
harness.check(len(A.members), 2, "A has two members")
harness.check(A.members[0].identifier.name, "y", "First member is 'y'")
harness.check(A.members[1].identifier.name, "x", "Second member is 'x'")
# Duplicated member names not allowed
threw = False
try:
parser.parse("""
C implements D;
interface D {
attribute long x;
};
interface C {
attribute long x;
};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface duplicating "
"a name on base interface")
# Same, but duplicated across implemented interfaces
threw = False
try:
parser.parse("""
E implements F;
E implements G;
interface F {
attribute long x;
};
interface G {
attribute long x;
};
interface E {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across indirectly implemented interfaces
threw = False
try:
parser.parse("""
H implements I;
H implements J;
I implements K;
interface K {
attribute long x;
};
interface L {
attribute long x;
};
interface I {};
interface J : L {};
interface H {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on indirectly implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across an implemented interface and its parent
threw = False
try:
parser.parse("""
M implements N;
interface O {
attribute long x;
};
interface N : O {
attribute long x;
};
interface M {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface and its "
"ancestor duplicating member names")
# Reset the parser so we can actually find things where we expect
# them in the list
parser = parser.reset()
# Diamonds should be allowed
threw = False
try:
parser.parse("""
P implements Q;
P implements R;
Q implements S;
R implements S;
interface Q {};
interface R {};
interface S {
attribute long x;
};
interface P {};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Diamond inheritance is fine")
harness.check(results[6].identifier.name, "S", "We should be looking at 'S'")
harness.check(len(results[6].members), 1, "S should have one member")
harness.check(results[6].members[0].identifier.name, "x",
"S's member should be 'x'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestInterface implements TestCallbackInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the right-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestCallbackInterface implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the left-hand side of "
"'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
Dict implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the left-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
TestInterface implements Dict;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the right-hand side "
"of 'implements'")
| mpl-2.0 |
maftieu/CouchPotatoServer | libs/pynma/pynma.py | 73 | 4554 | #!/usr/bin/python
from xml.dom.minidom import parseString
from httplib import HTTPSConnection
from urllib import urlencode
__version__ = "0.1"
API_SERVER = 'nma.usk.bz'
ADD_PATH = '/publicapi/notify'
USER_AGENT="PyNMA/v%s"%__version__
def uniq_preserve(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def uniq(seq):
# Not order preserving
return {}.fromkeys(seq).keys()
class PyNMA(object):
"""PyNMA(apikey=[], developerkey=None)
takes 2 optional arguments:
- (opt) apykey: might me a string containing 1 key or an array of keys
- (opt) developerkey: where you can store your developer key
"""
def __init__(self, apikey=[], developerkey=None):
self._developerkey = None
self.developerkey(developerkey)
if apikey:
if type(apikey) == str:
apikey = [apikey]
self._apikey = uniq(apikey)
def addkey(self, key):
"Add a key (register ?)"
if type(key) == str:
if not key in self._apikey:
self._apikey.append(key)
elif type(key) == list:
for k in key:
if not k in self._apikey:
self._apikey.append(k)
def delkey(self, key):
"Removes a key (unregister ?)"
if type(key) == str:
if key in self._apikey:
self._apikey.remove(key)
elif type(key) == list:
for k in key:
if key in self._apikey:
self._apikey.remove(k)
def developerkey(self, developerkey):
"Sets the developer key (and check it has the good length)"
if type(developerkey) == str and len(developerkey) == 48:
self._developerkey = developerkey
def push(self, application="", event="", description="", url="", priority=0, batch_mode=False):
"""Pushes a message on the registered API keys.
takes 5 arguments:
- (req) application: application name [256]
- (req) event: event name [1000]
- (req) description: description [10000]
- (opt) url: url [512]
- (opt) priority: from -2 (lowest) to 2 (highest) (def:0)
- (opt) batch_mode: call API 5 by 5 (def:False)
Warning: using batch_mode will return error only if all API keys are bad
cf: http://nma.usk.bz/api.php
"""
datas = {
'application': application[:256].encode('utf8'),
'event': event[:1024].encode('utf8'),
'description': description[:10000].encode('utf8'),
'priority': priority
}
if url:
datas['url'] = url[:512]
if self._developerkey:
datas['developerkey'] = self._developerkey
results = {}
if not batch_mode:
for key in self._apikey:
datas['apikey'] = key
res = self.callapi('POST', ADD_PATH, datas)
results[key] = res
else:
for i in range(0, len(self._apikey), 5):
datas['apikey'] = ",".join(self._apikey[i:i+5])
res = self.callapi('POST', ADD_PATH, datas)
results[datas['apikey']] = res
return results
def callapi(self, method, path, args):
headers = { 'User-Agent': USER_AGENT }
if method == "POST":
headers['Content-type'] = "application/x-www-form-urlencoded"
http_handler = HTTPSConnection(API_SERVER)
http_handler.request(method, path, urlencode(args), headers)
resp = http_handler.getresponse()
try:
res = self._parse_reponse(resp.read())
except Exception, e:
res = {'type': "pynmaerror",
'code': 600,
'message': str(e)
}
pass
return res
def _parse_reponse(self, response):
root = parseString(response).firstChild
for elem in root.childNodes:
if elem.nodeType == elem.TEXT_NODE: continue
if elem.tagName == 'success':
res = dict(elem.attributes.items())
res['message'] = ""
res['type'] = elem.tagName
return res
if elem.tagName == 'error':
res = dict(elem.attributes.items())
res['message'] = elem.firstChild.nodeValue
res['type'] = elem.tagName
return res
| gpl-3.0 |
AbinashBishoyi/python-whatsapp-bot | modules/registration.py | 2 | 4361 | import json
import modules
bot=None
def onclientinfochange():
global bot
clientsinfo=bot.clientsinfo
with open(bot.clientsinfofile,'w') as clientinfofileobj:
json.dump(clientsinfo,clientinfofileobj)
def AI(jid,message,querer,group):
global bot
try:
clientinfo=bot.clientsinfo[jid]
except KeyError:
if bot.admin in jid:
clientinfo={'okaytotalk':True}
bot.clientsinfo[jid]=clientinfo
else:
clientinfo={'okaytotalk':False,'sentprereqs':False}
bot.clientsinfo[jid]=clientinfo
if not clientinfo['okaytotalk']:
if not clientinfo['sentprereqs']:
preprereqs="Hi there! Before you start (ab)using me, I need to make sure the suffering is worth it. Please copy paste the registration form below and fill out the details INSIDE the square brackets : [like this]. Do not edit anything outside. Do not remove the square brackets. Send it back right here."
if group:
preprereqs+="\nIf you choose not to register within 50 messages I will leave the group automatically"
prereqs="Auth code=%s\nRegistration form for group use. \nGroup name: []\nGroup admin name: []\nGroup admin email: []\nGroup admin phone: []\nGroup admin occupation: []\nBrief description of group activity: []\nHow did you come to know about me?: []\nEnd of registration form" % jid
else:
prereqs="Auth code=%s\nRegistration form for individual use. \nYour name: []\nEmail address: []\nPhone number: []\nOccupation: []\nHow did you come to know about me?: []\nEnd of registration form" % jid
postreqs="By registering, you agree to the following terms and conditions.\n0. My owner has the right to stop running me AT ANY TIME\n1. You agree to not use me in any way that will cause trouble to my owner, the humanity, or the universe.\n\nAdditional info:\nI am open source and released under the MIT license. Visit github.com/asdofindia/python-whatsapp-bot to view/clone/contribute to my source code.\nCredits:\n* pandorabots.com for their demo bots\n* Tarek Galal for Yowsup library\n* Akshay S Dinesh for fathering me"
modules.sender.message_queue(jid,preprereqs)
modules.sender.message_queue(jid,prereqs)
modules.sender.message_queue(jid,postreqs)
clientinfo['sentprereqs']=True
bot.clientsinfo[jid]=clientinfo
onclientinfochange()
else:
try:
clientinfo['messagecount']+=1
except KeyError:
clientinfo['messagecount']=1
if clientinfo['messagecount']>50:
if group:
bot.methodsInterface.call("group_end",(jid,))
registrationdetails=message.split('\n')
if registrationdetails[0]==("Auth code="+jid):
registrationtried=True
regdetails={}
try:
registrationdetails=registrationdetails[2:-1]
errormessage=""
for detail in registrationdetails:
detailsplit=detail.split(': ')
question=detailsplit[0]
answer=detailsplit[1].strip('[]').strip()
if not answer:
errormessage+="Please fill the field: %s\n" % question
else:
regdetails[question]=answer
if errormessage:
errormessage+="Resubmit the registration form with correct details. Do not edit anything outside the square brackets."
registrationfailed=True
else:
registrationfailed=False
except IndexError:
errormessage="Please copy paste the registration form and fill it carefully with correct details. Do not edit anything outside the square brackets"
registrationfailed=True
if registrationfailed:
modules.sender.message_queue(jid,errormessage)
else:
successmessage="Registration succeeded. Please bear in mind that my owner will be checking the registration details and ban you if they are bogus details"
modules.sender.message_queue(jid,successmessage)
clientinfo['okaytotalk']=True
clientinfo['regdetails']=regdetails
bot.clientsinfo[jid]=clientinfo
onclientinfochange()
def onMessageReceived(messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
AI(jid,messageContent,pushName,None)
def onGroupMessageReceived(messageId, jid, msgauthor, messageContent, timestamp, wantsReceipt, pushName):
AI(jid,messageContent,pushName,msgauthor)
def setup(parent):
parent.signalsInterface.registerListener("message_received", onMessageReceived)
parent.signalsInterface.registerListener("group_messageReceived", onGroupMessageReceived)
global bot
bot=parent
| mit |
CannonFotter/shadowsocks | tests/test_udp_src.py | 1009 | 2482 | #!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
| apache-2.0 |
devlights/try-python | trypython/stdlib/dataclasses_/dataclasses02.py | 1 | 1182 | """
dataclasses モジュールのサンプルです.
fronzen プロパティの指定について
REFERENCESS:: http://bit.ly/2KTZynw
http://bit.ly/2KJCnwk
http://bit.ly/2KHeNA9
http://bit.ly/2KFLGxc
"""
import dataclasses as dc
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
@dc.dataclass(frozen=True)
class Data1:
name: str
unit_price: float
quantity: int = 0
def total_cost(self) -> float:
return self.unit_price * self.quantity
class Sample(SampleBase):
def exec(self):
obj = Data1(name='test', unit_price=300.5)
try:
# --------------------------------------------------------
# frozen 指定している dataclass は値の設定が出来ないようになる.
# dataclasses.FrozenInstanceError が発生する.
# --------------------------------------------------------
# noinspection PyDataclass
obj.quantity = 5
except dc.FrozenInstanceError as e:
pr('frozen な dataclass に値を設定', e)
def go():
obj = Sample()
obj.exec()
| mit |
rackergs/ansible-modules-extras | notification/slack.py | 61 | 7655 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: slack
short_description: Send Slack notifications
description:
- The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
version_added: 1.6
author: "Ramon de la Fuente (@ramondelafuente)"
options:
domain:
description:
- Slack (sub)domain for your environment without protocol. (i.e.
C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may
be ignored. See token documentation for information.
required: false
token:
description:
- Slack integration token. This authenticates you to the slack service.
Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
1.8 and above, ansible adapts to the new slack API where tokens look
like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
are in the new format then slack will ignore any value of domain. If
the token is in the old format the domain is required. Ansible has no
control of when slack will get rid of the old API. When slack does
that the old format will stop working.
required: true
msg:
description:
- Message to send.
required: true
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
required: false
username:
description:
- This is the sender of the message.
required: false
default: ansible
icon_url:
description:
- Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico))
required: false
icon_emoji:
description:
- Emoji for the message sender. See Slack documentation for options.
(if I(icon_emoji) is set, I(icon_url) will not be used)
required: false
link_names:
description:
- Automatically create links for channels and usernames in I(msg).
required: false
default: 1
choices:
- 1
- 0
parse:
description:
- Setting for the message parser at Slack
required: false
choices:
- 'full'
- 'none'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
color:
version_added: 2.0
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
required: false
default: 'normal'
choices:
- 'normal'
- 'good'
- 'warning'
- 'danger'
"""
EXAMPLES = """
- name: Send notification message via Slack
local_action:
module: slack
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} completed"
- name: Send notification message via Slack all options
local_action:
module: slack
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} completed"
channel: "#ansible"
username: "Ansible on {{ inventory_hostname }}"
icon_url: "http://www.example.com/some-image-file.png"
link_names: 0
parse: 'none'
- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
slack:
domain: future500.slack.com
token: thetokengeneratedbyslack
msg: "{{ inventory_hostname }} is alive!"
color: good
username: ""
icon_url: ""
"""
OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color):
if color == 'normal':
payload = dict(text=text)
else:
payload = dict(attachments=[dict(text=text, color=color)])
if channel is not None:
if (channel[0] == '#') or (channel[0] == '@'):
payload['channel'] = channel
else:
payload['channel'] = '#'+channel
if username is not None:
payload['username'] = username
if icon_emoji is not None:
payload['icon_emoji'] = icon_emoji
else:
payload['icon_url'] = icon_url
if link_names is not None:
payload['link_names'] = link_names
if parse is not None:
payload['parse'] = parse
payload="payload=" + module.jsonify(payload)
return payload
def do_notify_slack(module, domain, token, payload):
if token.count('/') >= 2:
# New style token
slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token)
else:
if not domain:
module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook")
slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
response, info = fetch_url(module, slack_incoming_webhook, data=payload)
if info['status'] != 200:
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(
argument_spec = dict(
domain = dict(type='str', required=False, default=None),
token = dict(type='str', required=True, no_log=True),
msg = dict(type='str', required=True),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'),
icon_emoji = dict(type='str', default=None),
link_names = dict(type='int', default=1, choices=[0,1]),
parse = dict(type='str', default=None, choices=['none', 'full']),
validate_certs = dict(default='yes', type='bool'),
color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger'])
)
)
domain = module.params['domain']
token = module.params['token']
text = module.params['msg']
channel = module.params['channel']
username = module.params['username']
icon_url = module.params['icon_url']
icon_emoji = module.params['icon_emoji']
link_names = module.params['link_names']
parse = module.params['parse']
color = module.params['color']
payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color)
do_notify_slack(module, domain, token, payload)
module.exit_json(msg="OK")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
algorithm-ninja/territoriali-backend | terry/__main__.py | 1 | 1084 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2017-2018 - Edoardo Morassutto <edoardo.morassutto@gmail.com>
# Copyright 2017-2018 - Luca Versari <veluca93@gmail.com>
import argparse
from terry.config import Config
from terry.contest_manager import ContestManager
from terry.database import Database
from terry.logger import Logger
from terry.server import Server
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
help="Path to the config file",
default="config/config.yaml")
args = parser.parse_args()
if args.config:
Config.set_config_file(args.config)
Logger.set_log_level(Config.log_level)
Logger.connect_to_database()
Database.connect_to_database()
ContestManager.read_from_disk(remove_enc=False)
server = Server()
server.run()
if __name__ == '__main__':
main()
| mpl-2.0 |
Dandandan/wikiprogramming | jsrepl/build/extern/python/closured/lib/python2.7/posixpath.py | 145 | 13182 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
i = p.rfind('/') + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
head = p[:i]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
s2 = os.lstat(join(path, '..'))
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
try:
st = os.lstat(name)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/') or userhome
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})')
i = 0
while True:
m = _varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if name in os.environ:
tail = path[j:]
path = path[:i] + os.environ[name]
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
if isabs(filename):
bits = ['/'] + filename.split('/')[1:]
else:
bits = [''] + filename.split('/')
for i in range(2, len(bits)+1):
component = join(*bits[0:i])
# Resolve symbolic links.
if islink(component):
resolved = _resolve_link(component)
if resolved is None:
# Infinite loop -- return original component + rest of the path
return abspath(join(*([component] + bits[i:])))
else:
newpath = join(*([resolved] + bits[i:]))
return realpath(newpath)
return abspath(filename)
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = set()
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.add(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| mit |
cgranade/qutip | qutip/tests/test_qubit_evolution.py | 8 | 6429 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY np.exp(RESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import run_module_suite, assert_equal
from qutip import (sigmax, sigmay, sigmaz, sigmam, mesolve, mcsolve, essolve,
basis)
def _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, solver):
H = epsilon / 2.0 * sigmaz() + delta / 2.0 * sigmax()
c_op_list = []
rate = g1
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sigmam())
rate = g2
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sigmaz())
e_ops = [sigmax(), sigmay(), sigmaz()]
if solver == "me":
output = mesolve(H, psi0, tlist, c_op_list, e_ops)
elif solver == "es":
output = essolve(H, psi0, tlist, c_op_list, e_ops)
elif solver == "mc":
output = mcsolve(H, psi0, tlist, c_op_list, e_ops, ntraj=750)
else:
raise ValueError("unknown solver")
return output.expect[0], output.expect[1], output.expect[2]
def test_MESolverCase1():
"""
Test mesolve qubit, with dissipation
"""
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.1
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, "me")
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_equal(max(abs(sx - sx_analytic)) < 0.05, True)
assert_equal(max(abs(sy - sy_analytic)) < 0.05, True)
assert_equal(max(abs(sz - sz_analytic)) < 0.05, True)
def test_MESolverCase2():
"""
Test mesolve qubit, no dissipation
"""
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.0
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, "me")
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_equal(max(abs(sx - sx_analytic)) < 0.05, True)
assert_equal(max(abs(sy - sy_analytic)) < 0.05, True)
assert_equal(max(abs(sz - sz_analytic)) < 0.05, True)
def test_ESSolverCase1():
"""
Test essolve qubit, with dissipation
"""
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.1
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, "es")
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_equal(max(abs(sx - sx_analytic)) < 0.05, True)
assert_equal(max(abs(sy - sy_analytic)) < 0.05, True)
assert_equal(max(abs(sz - sz_analytic)) < 0.05, True)
def test_MCSolverCase1():
"""
Test mcsolve qubit, with dissipation
"""
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.1
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, "mc")
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_equal(max(abs(sx - sx_analytic)) < 0.25, True)
assert_equal(max(abs(sy - sy_analytic)) < 0.25, True)
assert_equal(max(abs(sz - sz_analytic)) < 0.25, True)
def test_MCSolverCase2():
"""
Test mcsolve qubit, no dissipation
"""
epsilon = 0.0 * 2 * np.pi # cavity frequency
delta = 1.0 * 2 * np.pi # atom frequency
g2 = 0.0
g1 = 0.0
psi0 = basis(2, 0) # initial state
tlist = np.linspace(0, 5, 200)
sx, sy, sz = _qubit_integrate(tlist, psi0, epsilon, delta, g1, g2, "mc")
sx_analytic = np.zeros(np.shape(tlist))
sy_analytic = -np.sin(2 * np.pi * tlist) * np.exp(-tlist * g2)
sz_analytic = np.cos(2 * np.pi * tlist) * np.exp(-tlist * g2)
assert_equal(max(abs(sx - sx_analytic)) < 0.25, True)
assert_equal(max(abs(sy - sy_analytic)) < 0.25, True)
assert_equal(max(abs(sz - sz_analytic)) < 0.25, True)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
mvdroest/RTLSDR-Scanner | src/cli.py | 1 | 9447 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import Queue
from collections import OrderedDict
import os
import sys
from threading import Thread
import threading
import time
from urlparse import urlparse
from constants import SAMPLE_RATE
from devices import DeviceRTL, get_devices_rtl
from events import Event, post_event, EventThread
from file import save_plot, export_plot, ScanInfo, File
from location import ThreadLocation
from misc import nearest, calc_real_dwell, next_2_to_pow
from scan import ThreadScan, update_spectrum, ThreadProcess
from settings import Settings
class Cli(object):
def __init__(self, args):
start = args.start
end = args.end
sweeps = args.sweeps
gain = args.gain
dwell = args.dwell
nfft = args.fft
lo = args.lo
index = args.index
remote = args.remote
directory, filename = os.path.split(args.file)
_null, ext = os.path.splitext(args.file)
self.lock = threading.Lock()
self.stepsTotal = 0
self.steps = 0
self.spectrum = OrderedDict()
self.locations = OrderedDict()
self.settings = Settings(load=False)
self.queueNotify = Queue.Queue()
self.queueScan = Queue.Queue()
self.queueLocation = Queue.Queue()
self.threadLocation = None
error = None
if end <= start:
error = "Start should be lower than end"
elif dwell <= 0:
error = "Dwell should be positive"
elif nfft <= 0:
error = "FFT bins should be positive"
elif ext != ".rfs" and File.get_type_index(ext) == -1:
error = "File extension should be "
error += File.get_type_pretty(File.Types.SAVE)
error += File.get_type_pretty(File.Types.PLOT)
else:
device = DeviceRTL()
if remote is None:
self.settings.devicesRtl = get_devices_rtl()
count = len(self.settings.devicesRtl)
if index > count - 1:
error = "Device not found ({} devices in total):\n".format(count)
for device in self.settings.devicesRtl:
error += "\t{}: {}\n".format(device.indexRtl,
device.name)
else:
device.isDevice = False
url = urlparse('//' + remote)
if url.hostname is not None:
device.server = url.hostname
else:
error = "Invalid hostname"
if url.port is not None:
device.port = url.port
else:
device.port = 1234
self.settings.devicesRtl.append(device)
index = len(self.settings.devicesRtl) - 1
if args.conf is not None:
if os.path.exists(args.conf):
error = self.settings.load_conf(args.conf)
else:
error = 'Cannot find {}'.format(args.conf)
if end - 1 < start:
end = start + 1
if remote is None:
if len(self.settings.devicesRtl):
gain = nearest(gain, self.settings.devicesRtl[index].gains)
else:
error = 'No devices found'
if error is not None:
print "Error: {}".format(error)
exit(1)
self.settings.start = start
self.settings.stop = end
self.settings.dwell = calc_real_dwell(dwell)
self.settings.scanDelay = args.delay
self.settings.nfft = nfft
self.settings.devicesRtl[index].gain = gain
self.settings.devicesRtl[index].lo = lo
print "{} - {}MHz".format(start, end)
print "{} Sweeps".format(sweeps)
print "{}dB Gain".format(gain)
print "{}s Dwell".format(self.settings.dwell)
print "{} FFT points".format(nfft)
print "{}MHz LO".format(lo)
if remote is not None:
print remote
else:
print self.settings.devicesRtl[index].name
if len(self.settings.devicesGps):
self.threadLocation = ThreadLocation(self.queueLocation,
self.settings.devicesGps[0])
if not self.__gps_wait():
self.__gps_stop()
exit(1)
self.__scan(sweeps, self.settings, index)
fullName = os.path.join(directory, filename)
if ext == ".rfs":
scanInfo = ScanInfo()
scanInfo.set_from_settings(self.settings)
save_plot(fullName, scanInfo, self.spectrum, self.locations)
else:
exportType = File.get_type_index(ext)
export_plot(fullName, exportType, self.spectrum)
self.__gps_stop()
print "Done"
def __gps_wait(self):
print '\nWaiting for GPS fix: {}'.format(self.settings.devicesGps[0].get_serial_desc())
while True:
if not self.queueLocation.empty():
status = self.__process_event(self.queueLocation, None)
if status == Event.LOC:
return True
elif status == Event.LOC_ERR:
return False
def __gps_stop(self):
if self.threadLocation and self.threadLocation.isAlive():
self.threadLocation.stop()
def __scan(self, sweeps, settings, index):
samples = settings.dwell * SAMPLE_RATE
samples = next_2_to_pow(int(samples))
for sweep in range(0, sweeps):
print '\nSweep {}:'.format(sweep + 1)
threadScan = ThreadScan(self.queueNotify, self.queueScan, None,
settings, index, samples, False)
while threadScan.isAlive() or self.steps > 0:
if not self.queueNotify.empty():
self.__process_event(self.queueNotify)
if not self.queueLocation.empty():
self.__process_event(self.queueLocation)
if self.settings.scanDelay > 0 and sweep < sweeps - 1:
print '\nDelaying {}s'.format(self.settings.scanDelay)
time.sleep(self.settings.scanDelay)
threadScan.rtl_close()
print ""
print ""
def __process_event(self, queue):
event = queue.get()
status = event.data.get_status()
arg1 = event.data.get_arg1()
arg2 = event.data.get_arg2()
if status == Event.STARTING:
print "Starting"
elif status == Event.STEPS:
self.stepsTotal = (arg1 + 1) * 2
self.steps = self.stepsTotal
elif status == Event.INFO:
if arg2 != -1:
self.settings.devicesRtl[self.settings.indexRtl].tuner = arg2
elif status == Event.DATA:
cal = self.settings.devicesRtl[self.settings.indexRtl].calibration
freq, scan = self.queueScan.get()
process = ThreadProcess(self.queueNotify,
freq, scan, cal,
self.settings.nfft,
self.settings.overlap,
self.settings.winFunc)
process.start()
self.__progress()
elif status == Event.ERROR:
print "Error: {}".format(arg2)
exit(1)
elif status == Event.PROCESSED:
offset = self.settings.devicesRtl[self.settings.indexRtl].offset
Thread(target=update_spectrum, name='Update',
args=(self.queueNotify, self.lock,
self.settings.start,
self.settings.stop,
arg1,
offset,
self.spectrum,
not self.settings.retainScans,
False)).start()
elif status == Event.UPDATED:
self.__progress()
elif status == Event.LOC:
if len(self.spectrum) > 0:
self.locations[max(self.spectrum)] = (arg2[0],
arg2[1],
arg2[2])
elif status == Event.LOC_ERR:
print '{}'.format(arg2)
exit(1)
return status
def __progress(self):
self.steps -= 1
comp = (self.stepsTotal - self.steps) * 100 / self.stepsTotal
sys.stdout.write("\r{0:.1f}%".format(comp))
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
Serag8/Bachelor | google_appengine/lib/django-1.2/django/core/handlers/base.py | 44 | 9926 | import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
try:
# Setup default url resolver for this thread.
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| mit |
kingctan/Misago | misago/forums/tests/test_lists.py | 8 | 1362 | from misago.acl.testutils import override_acl
from misago.users.testutils import AuthenticatedUserTestCase
from misago.forums.lists import get_forums_list, get_forum_path
from misago.forums.models import Forum
class ForumsListsTests(AuthenticatedUserTestCase):
def setUp(self):
super(ForumsListsTests, self).setUp()
forums_acl = {'forums': {}, 'visible_forums': []}
for forum in Forum.objects.all_forums():
forums_acl['visible_forums'].append(forum.pk)
forums_acl['forums'][forum.pk] = {'can_see': 1, 'can_browse': 1}
override_acl(self.user, forums_acl)
def test_root_forums_list_no_parent(self):
"""get_forums_list returns all children of root nodes"""
self.assertEqual(len(get_forums_list(self.user)), 3)
def test_root_forums_list_with_parents(self):
"""get_forums_list returns all children of given node"""
for i, node in enumerate(get_forums_list(self.user)):
child_nodes = len(get_forums_list(self.user, node))
self.assertEqual(child_nodes, len(node.get_descendants()))
def test_get_forum_path(self):
"""get_forums_list returns all children of root nodes"""
for node in get_forums_list(self.user):
parent_nodes = len(get_forum_path(node))
self.assertEqual(parent_nodes, node.level)
| gpl-2.0 |
nwjs/chromium.src | tools/perf/experimental/representative_perf_test_limit_adjuster/adjust_upper_limits.py | 1 | 6803 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import os
import sys
import shutil
import subprocess
import tempfile
CHROMIUM_PATH = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
TOOLS_PERF_PATH = os.path.join(CHROMIUM_PATH, 'tools', 'perf')
sys.path.insert(1, TOOLS_PERF_PATH)
from core.external_modules import pandas
RUNS_USED_FOR_LIMIT_UPDATE = 30
CHANGE_PERCENTAGE_LIMIT = 0.01
SWARMING_PATH = os.path.join(
CHROMIUM_PATH, 'tools', 'swarming_client', 'swarming.py')
UPPER_LIMITS_DATA_DIR = os.path.join(
CHROMIUM_PATH, 'testing', 'scripts', 'representative_perf_test_data')
def FetchItemIds(tags, limit):
"""Fetches the item id of tasks described by the tags.
Args:
tags: The tags which describe the task such as OS and buildername.
limit: The number of runs to look at.
Returns:
A list containing the item Id of the tasks.
"""
swarming_attributes = (
'tasks/list?tags=name:rendering_representative_perf_tests&tags=os:{os}'
'&tags=buildername:{buildername}&tags=master:chromium.gpu.fyi&state='
'COMPLETED&fields=cursor,items(task_id)').format(**tags)
query = [
SWARMING_PATH, 'query', '-S', 'chromium-swarm.appspot.com', '--limit',
str(limit), swarming_attributes]
output = json.loads(subprocess.check_output(query))
return output.get('items')
def FetchItemData(task_id, benchmark, index, temp_dir):
"""Fetches the performance values (AVG & CI ranges) of tasks.
Args:
task_id: The list of item Ids to fetch dat for.
benchmark: The benchmark these task are on (desktop/mobile).
index: The index field of the data_frame
temp_dir: The temp directory to store task data in.
Returns:
A data_frame containing the averages and confidence interval ranges.
"""
output_directory = os.path.abspath(
os.path.join(temp_dir, task_id))
query = [
SWARMING_PATH, 'collect', '-S', 'chromium-swarm.appspot.com',
'--task-output-dir', output_directory, task_id]
try:
subprocess.check_output(query)
except Exception as e:
print(e)
result_file_path = os.path.join(
output_directory, '0', 'rendering.' + benchmark, 'perf_results.csv')
try:
df = pandas.read_csv(result_file_path)
df = df.loc[df['name'] == 'frame_times']
df = df[['stories', 'avg', 'ci_095']]
df['index'] = index
return df
except:
print("CSV results were not produced!")
def GetPercentileValues(benchmark, tags, limit, percentile):
"""Get the percentile value of recent runs described by given tags.
Given the tags, benchmark this function fetches the data of last {limit}
runs, and find the percentile value for each story.
Args:
benchmark: The benchmark these task are on (desktop/mobile).
tags: The tags which describe the tasks such as OS and buildername.
limit: The number of runs to look at.
percentile: the percentile to return.
Returns:
A dictionary with averages and confidence interval ranges calculated
from the percentile of recent runs.
"""
items = []
for tag_set in tags:
items.extend(FetchItemIds(tag_set, limit))
dfs = []
try:
temp_dir = tempfile.mkdtemp('perf_csvs')
for idx, item in enumerate(items):
dfs.append(FetchItemData(item['task_id'], benchmark, idx, temp_dir))
idx += 1
finally:
shutil.rmtree(temp_dir)
data_frame = pandas.concat(dfs, ignore_index=True)
if not data_frame.empty:
avg_df = data_frame.pivot(index='stories', columns='index', values='avg')
upper_limit = avg_df.quantile(percentile, axis = 1)
ci_df = data_frame.pivot(index='stories', columns='index', values='ci_095')
upper_limit_ci = ci_df.quantile(percentile, axis = 1)
results = {}
for index in avg_df.index:
results[index] = {
'avg': round(upper_limit[index], 3),
'ci_095': round(upper_limit_ci[index], 3)
}
return results
def MeasureNewUpperLimit(old_value, new_value, att_name, max_change):
# There has been an improvement.
if new_value < old_value:
# Decrease the limit gradually in case of improvements.
new_value = (old_value + new_value) / 2.0
change_pct = 0.0
if old_value > 0:
change_pct = (new_value - old_value) / old_value
print(
' {}:\t\t {} -> {} \t({:.2f}%)'.format(
att_name, old_value, new_value, change_pct * 100))
if new_value < 0.01:
print('WARNING: New selected value is close to 0.')
return (
round(new_value, 3),
max(max_change, abs(change_pct))
)
def RecalculateUpperLimits(data_point_count):
"""Recalculates the upper limits using the data of recent runs.
This method replaces the existing JSON file which contains the upper limits
used by representative perf tests if the changes of upper limits are
significant.
Args:
data_point_count: The number of runs to use for recalculation.
"""
with open(os.path.join(UPPER_LIMITS_DATA_DIR,
'platform_specific_tags.json')) as tags_data:
platform_specific_tags = json.load(tags_data)
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json')) as current_data:
current_upper_limits = json.load(current_data)
max_change = 0.0
results = {}
for platform in platform_specific_tags:
platform_data = platform_specific_tags[platform]
print('\n- Processing data ({})'.format(platform))
results[platform] = GetPercentileValues(
platform_data['benchmark'], platform_data['tags'],
data_point_count, 0.95)
# Loop over results and adjust base on current values.
for story in results[platform]:
if story in current_upper_limits[platform]:
print(story, ':')
new_avg, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['avg'],
results[platform][story]['avg'], 'AVG', max_change)
results[platform][story]['avg'] = new_avg
new_ci, max_change = MeasureNewUpperLimit(
current_upper_limits[platform][story]['ci_095'],
results[platform][story]['ci_095'], 'CI', max_change)
results[platform][story]['ci_095'] = new_ci
if max_change > CHANGE_PERCENTAGE_LIMIT:
with open(
os.path.join(
UPPER_LIMITS_DATA_DIR,
'representatives_frame_times_upper_limit.json'
), 'w') as outfile:
json.dump(results, outfile, separators=(',', ': '), indent=2)
print(
'Upper limits were updated on '
'representatives_frame_times_upper_limit.json')
else:
print('Changes are small, no need for new limits')
if __name__ == '__main__':
sys.exit(RecalculateUpperLimits(RUNS_USED_FOR_LIMIT_UPDATE)) | bsd-3-clause |
flavour/cert | static/scripts/tools/sync_setup.py | 3 | 5314 | # Setup Script for Sync:
# - only needed for active sites (sync masters)
# - run after 1st run initialization and after the admin account has been created
# - configure the values below either manually or by config script for the respective instance
#
# Use like:
# (Win32 users prefix the config options with 'set ' & no need to export)
# site_type=active
# export site_type
# ...
# cd /path/to/web2py
# python web2py.py -S <appname> -M -R sync_setup.py
import os
import uuid
# Configuration ===============================================================
# site_type = "active"|"passive"
try:
site_type = os.environ["site_type"]
except KeyError:
site_type = "active"
# proxy URL, e.g. "http://proxy.example.com:3128"
try:
proxy_url = os.environ["proxy_url"]
except KeyError:
proxy_url = None
# Passive site URL (required)
try:
passive_site_url = os.environ["passive_site_url"]
except KeyError:
passive_site_url = "http://www.example.com/eden"
# Passive site admin username and password
try:
passive_site_username = os.environ["passive_site_username"]
except KeyError:
passive_site_username = "admin@example.com"
try:
passive_site_password = os.environ["passive_site_password"]
except KeyError:
passive_site_password = "testing"
# Resource names (master table names)
try:
resources = [ os.environ["sync_resources_1"] ]
except KeyError:
resources = [
"pr_person",
"org_organisation",
"org_office",
"req_req"
]
else:
for i in range(2, 500):
try:
resources.append(os.environ["sync_resources_%i" % i])
except KeyError:
break
# Synchronization interval, minutes
try:
sync_interval = os.environ["sync_interval"]
except KeyError:
sync_interval = 2
# End of configuration options ================================================
# Load models
s3mgr.load("sync_config")
if site_type == "active":
# Settings
sync_config = db.sync_config
config = Storage(proxy=proxy_url)
record = db(sync_config.id!=None).select(sync_config.id, limitby=(0, 1)).first()
if record:
record.update_record(**config)
else:
sync_config.insert(**config)
# Repository
sync_repository = db.sync_repository
repository = Storage(name="Passive",
url=passive_site_url,
username=passive_site_username,
password=passive_site_password)
q = (sync_repository.name == repository.name)
record = db(q).select(sync_repository.id, limitby=(0, 1)).first()
if record:
repository_id = record.id
repository.update(deleted=False)
record.update_record(**repository)
else:
repository_id = sync_repository.insert(**repository)
repository.id = repository_id
if not repository_id:
raise RuntimeError("Cannot register or update peer repository")
else:
success = s3base.S3Sync().request_registration(repository)
if not success:
print >>sys.stderr, "Could not auto-register repository, please register manually"
# Resources
sync_policies = s3base.S3ImportItem.POLICY
sync_task = db.sync_task
for resource_name in resources:
task = Storage(resource_name=resource_name,
repository_id=repository_id)
q = (sync_task.repository_id == repository_id) & \
(sync_task.resource_name == resource_name)
record = db(q).select(sync_repository.id, limitby=(0, 1)).first()
if record:
task.update(deleted=False)
record.update_record(**task)
else:
sync_task.insert(**task)
# Scheduler task
task = str(uuid.uuid4())
function_name="sync_synchronize"
args = [repository_id]
repeats = 0
period = sync_interval * 60
timeout = 600
gtable = db.auth_membership
query = (gtable.group_id == ADMIN) # & (gtable.deleted != True)
record = db(query).select(gtable.user_id, limitby=(0, 1)).first()
vars = dict()
if record:
vars.update(user_id = record.user_id)
now = datetime.datetime.utcnow()
then = now + datetime.timedelta(days=365)
scheduler_task_id = current.s3task.schedule_task(task,
function_name=function_name,
args=args,
vars=vars,
start_time=now,
stop_time=then,
repeats=repeats,
period=period,
timeout=timeout,
ignore_duplicate=False)
# Job link
if scheduler_task_id:
sync_job = db.sync_job
job = Storage(repository_id=repository_id,
scheduler_task_id=scheduler_task_id)
record = db().select(sync_job.id, limitby=(0, 1)).first()
if record:
record.update_record(**job)
else:
sync_job.insert(**job)
db.commit()
| mit |
iohannez/gnuradio | gr-filter/examples/synth_to_chan.py | 7 | 3891 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps) / nchans))
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in range(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = numpy.blackman
#winfunc = numpy.hamming
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pyplot.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
breisfeld/avoplot | examples/adv_sine_wave.py | 3 | 8650 | import numpy
import matplotlib.pyplot as plt
import math
from avoplot import plugins, series, controls, subplots
from avoplot.gui import widgets
import wx
plugin_is_GPL_compatible = True
class TrigFuncSubplot(subplots.AvoPlotXYSubplot):
def my_init(self):
"""
When defining your own subplot classes, you should not need to override
the __init__ method of the base class. Instead you should define a
my_init() method which takes no args. This will be called automatically
when the subplot is created. Use this to customise the subplot to suit
your specific needs - settings titles, axis formatters etc.
"""
#call the parent class's my_init() method. This is not required, unless
#you want to make use of any customisation done by the parent class.
#Note that this includes any control panels defined by the parent class!
super(TrigFuncSubplot, self).my_init()
#set up some axis titles
ax = self.get_mpl_axes()
ax.set_xlabel(r'$\theta$ (radians)')
ax.set_ylabel('y')
#add the units control panel to this subplot to allow the user to change
#the x-axis units.
self.add_control_panel(TrigSubplotUnitsCtrl(self))
#set the initial name of the subplot
self.set_name("Trig. Function Subplot")
class SineWaveSeries(series.XYDataSeries):
"""
Define our own data series type for Sine data. Unlike for subplots, when
defining custom data series, we do override the __init__ method.
"""
def __init__(self, *args, **kwargs):
super(SineWaveSeries, self).__init__(*args, **kwargs)
#add a control for this data series to allow the user to change the
#frequency of the wave using a slider.
self.add_control_panel(SineWaveFreqCtrl(self))
@staticmethod
def get_supported_subplot_type():
"""
This is how we restrict which data series can be plotted into which
types of subplots. Specialised subplots may provide controls for dealing
with very specific types of data - for example, our TrigFuncSubplot
allows the x-axis to be switched between degrees and radians, it would
therefore make no sense to allow time series data to be plotted into it.
However, it might make sense to allow a SineWaveSeries to be plotted
into a general AvoPlotXYSuplot, and therefore this is permitted by
AvoPlot. The rule is as follows:
A data series may be plotted into a subplot if the subplot is an
instance of the class returned by its get_supported_subplot_type()
method or any of its base classes.
"""
return TrigFuncSubplot
class AdvExamplePlugin(plugins.AvoPlotPluginSimple):
"""
This class is the same as that used for the Sine wave example, except
that we use the SineWaveSeries data series class that we defined above
rather than the generic XYDataSeries class used before.
"""
def __init__(self):
super(AdvExamplePlugin, self).__init__("Example Plugin with Controls",
SineWaveSeries)
self.set_menu_entry(['Examples', 'Adv. Sine Wave'],
"Plot a sine wave with variable frequency")
def plot_into_subplot(self, subplot):
x_data = numpy.linspace(0, 7, 500)
y_data = numpy.sin(x_data)
data_series = SineWaveSeries("adv sine wave", xdata=x_data,
ydata=y_data)
subplot.add_data_series(data_series)
return True
def rad2deg(theta, pos):
"""
Function for converting radians to degrees for use with matplotlib's
FuncFormatter object.
"""
return '%0.2f'%math.degrees(theta)
class TrigSubplotUnitsCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for trig function subplots allowing their x axis units
to be changed from radians to degrees.
"""
def __init__(self, subplot):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(TrigSubplotUnitsCtrl, self).__init__("Units")
#store the subplot object that this control panel is associated with,
#so that we can access it later
self.subplot = subplot
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(TrigSubplotUnitsCtrl, self).setup(parent)
#create a choice box for the different units for the x axis
#we use a avoplot.gui.widgets.ChoiceSetting object which is a
#thin wrapper around a wx.ChoiceBox, but provides a label and
#automatically registers the event handler.
units_choice = widgets.ChoiceSetting(self, "x-axis units:", "Radians",
["Radians", "Degrees"],
self.on_units_change)
#add the choice widget to the control panel sizer
self.Add(units_choice, 0,wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border=10)
def on_units_change(self, evnt):
"""
Event handler for change of x axis units events.
"""
#get the matplotlib axes object from the subplot
ax = self.subplot.get_mpl_axes()
#change the axis labels and label formatting based on the choice of
#units
if evnt.GetString() == 'Degrees':
ax.set_xlabel(r'$\theta$ (degrees)')
ax.xaxis.set_major_formatter(plt.FuncFormatter(rad2deg))
else:
ax.set_xlabel(r'$\theta$ (radians)')
ax.xaxis.set_major_formatter(plt.ScalarFormatter())
#draw our changes in the display
self.subplot.update()
class SineWaveFreqCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel for sine wave data series allowing their frequency to
be changed using a slider.
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(SineWaveFreqCtrl, self).__init__("Freq.")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
def setup(self, parent):
"""
This is where all the controls get added to the control panel. You
*must* call the setup method of the parent class before doing any of
your own setup.
"""
#call parent class's setup method - do this before anything else
super(SineWaveFreqCtrl, self).setup(parent)
#create a label for the slider
label = wx.StaticText(self, wx.ID_ANY, 'Frequency')
self.Add(label, 0,
wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL,
border=10)
#create a frequency slider
self.slider = wx.Slider(self, wx.ID_ANY, value=1, minValue=1,
maxValue=30, style=wx.SL_LABELS)
#add the slider to the control panel's sizer
self.Add(self.slider, 0,
wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, border=10)
#register an event handler for slider change events
wx.EVT_COMMAND_SCROLL(self, self.slider.GetId(), self.on_slider_change)
def on_slider_change(self, evnt):
"""
Event handler for frequency slider change events.
"""
#change the frequency of the sine wave data accordingly
f = self.slider.GetValue()
x_data = numpy.linspace(0, 7, 2000)
y_data = numpy.sin(x_data * f)
#change the data in the series object
self.series.set_xy_data(xdata=x_data, ydata=y_data)
#draw our changes on the display
self.series.update()
#register the plugin with AvoPlot
plugins.register(AdvExamplePlugin())
| gpl-3.0 |
Sbalbp/DIRAC | RequestManagementSystem/Agent/RequestOperations/ForwardDISET.py | 3 | 2391 | ########################################################################
# $HeadURL $
# File: ForwardDISET.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/22 12:40:06
########################################################################
""" :mod: ForwardDISET
==================
.. module: ForwardDISET
:synopsis: DISET forwarding operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
DISET forwarding operation handler
"""
__RCSID__ = "$Id $"
# #
# @file ForwardDISET.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/22 12:40:22
# @brief Definition of ForwardDISET class.
# # imports
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.Core.DISET.RPCClient import executeRPCStub
from DIRAC.Core.Utilities import DEncode
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
########################################################################
class ForwardDISET( OperationHandlerBase ):
"""
.. class:: ForwardDISET
functor forwarding DISET operations
"""
def __init__( self, operation = None, csPath = None ):
""" c'tor
:param Operation operation: an Operation instance
:param str csPath: CS path for this handler
"""
# # call base class c'tor
OperationHandlerBase.__init__( self, operation, csPath )
def __call__( self ):
""" execute RPC stub """
# # decode arguments
try:
decode, length = DEncode.decode( self.operation.Arguments )
self.log.debug( "decoded len=%s val=%s" % ( length, decode ) )
except ValueError, error:
self.log.exception( error )
self.operation.Error = str( error )
self.operation.Status = "Failed"
return S_ERROR( str( error ) )
# No delegated identities in the requests
if 'delegatedDN' in decode[0][1]:
decode[0][1].pop('delegatedDN')
if 'delegatedGroup' in decode[0][1]:
decode[0][1].pop('delegatedGroup')
forward = executeRPCStub( decode )
if not forward["OK"]:
self.log.error( "unable to execute operation", "'%s' : %s" % ( self.operation.Type, forward["Message"] ) )
self.operation.Error = forward["Message"]
return forward
self.log.info( "DISET forwarding done" )
self.operation.Status = "Done"
return S_OK()
| gpl-3.0 |
dnozay/lettuce | tests/integration/lib/Django-1.2.5/django/db/models/fields/subclassing.py | 45 | 4384 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the __metaclass__ for your Field subclass, implement
to_python() and the other necessary methods and everything will work seamlessly.
"""
from inspect import getargspec
from warnings import warn
def call_with_connection(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = ('connection' in arg_names or varkwargs)
if not updated:
warn("A Field class whose %s method hasn't been updated to take a "
"`connection` argument." % func.__name__,
PendingDeprecationWarning, stacklevel=2)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, PendingDeprecationWarning,
stacklevel=1)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
return func(*args, **kwargs)
return inner
def call_with_connection_and_prepared(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = (
('connection' in arg_names or varkwargs) and
('prepared' in arg_names or varkwargs)
)
if not updated:
warn("A Field class whose %s method hasn't been updated to take "
"`connection` and `prepared` arguments." % func.__name__,
PendingDeprecationWarning, stacklevel=2)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, PendingDeprecationWarning,
stacklevel=1)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
if 'prepared' in kwargs:
del kwargs['prepared']
return func(*args, **kwargs)
return inner
class LegacyConnection(type):
"""
A metaclass to normalize arguments give to the get_db_prep_* and db_type
methods on fields.
"""
def __new__(cls, name, bases, attrs):
new_cls = super(LegacyConnection, cls).__new__(cls, name, bases, attrs)
for attr in ('db_type', 'get_db_prep_save'):
setattr(new_cls, attr, call_with_connection(getattr(new_cls, attr)))
for attr in ('get_db_prep_lookup', 'get_db_prep_value'):
setattr(new_cls, attr, call_with_connection_and_prepared(getattr(new_cls, attr)))
return new_cls
class SubfieldBase(LegacyConnection):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| gpl-3.0 |
migonzalvar/youtube-dl | youtube_dl/extractor/dump.py | 120 | 1036 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DumpIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
_TEST = {
'url': 'http://www.dump.com/oneus/',
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
'info_dict': {
'id': 'oneus',
'ext': 'flv',
'title': "He's one of us.",
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| unlicense |
webplate/astrini | InputHandler.py | 1 | 1070 | # -*- coding: utf-8-*-
from direct.showbase.DirectObject import DirectObject
import sys, time
class InputHandler(DirectObject):
def __init__(self, world):
self.world = world
#setting it active by default
self.setActive()
def setInactive(self):
# Main Modifier
self.ignoreAll()
def setActive(self):
# Main Modifier
base.accept("escape", sys.exit)
#My shortcuts
self.accept("a",self.world.Camera.hm.stop_follow)
self.accept("w",self.world.Camera.hm.stop_look)
self.accept("e",self.world.Camera.hm.follow,[self.world.earth])
self.accept("control-e",self.world.Camera.hm.look,[self.world.earth])
self.accept("r",self.world.Camera.hm.follow,[self.world.moon])
self.accept("control-r",self.world.Camera.hm.look,[self.world.moon])
self.accept("f",self.world.Camera.hm.follow,[self.world.sun])
self.accept("control-f",self.world.Camera.hm.look,[self.world.sun])
| gpl-3.0 |
jss-emr/openerp-7-src | openerp/tests/test_acl.py | 13 | 5091 | import unittest2
from lxml import etree
import openerp
from openerp.tools.misc import mute_logger
import common
# test group that demo user should not have
GROUP_TECHNICAL_FEATURES = 'base.group_no_one'
class TestACL(common.TransactionCase):
def setUp(self):
super(TestACL, self).setUp()
self.res_currency = self.registry('res.currency')
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.demo_uid = 3
self.tech_group = self.registry('ir.model.data').get_object(self.cr, self.uid,
*(GROUP_TECHNICAL_FEATURES.split('.')))
def test_field_visibility_restriction(self):
"""Check that model-level ``groups`` parameter effectively restricts access to that
field for users who do not belong to one of the explicitly allowed groups"""
# Verify the test environment first
original_fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertFalse(has_tech_feat, "`demo` user should not belong to the restricted group before the test")
self.assertTrue('rate' in original_fields, "'rate' field must be properly visible before the test")
self.assertNotEquals(view_arch.xpath("//field[@name='rate']"), [],
"Field 'rate' must be found in view definition before the test")
# Restrict access to the field and check it's gone
self.res_currency._columns['rate'].groups = GROUP_TECHNICAL_FEATURES
fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
self.assertFalse('rate' in fields, "'rate' field should be gone")
self.assertEquals(view_arch.xpath("//field[@name='rate']"), [],
"Field 'rate' must not be found in view definition")
# Make demo user a member of the restricted group and check that the field is back
self.tech_group.write({'users': [(4, self.demo_uid)]})
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
#import pprint; pprint.pprint(fields); pprint.pprint(form_view)
self.assertTrue(has_tech_feat, "`demo` user should now belong to the restricted group")
self.assertTrue('rate' in fields, "'rate' field must be properly visible again")
self.assertNotEquals(view_arch.xpath("//field[@name='rate']"), [],
"Field 'rate' must be found in view definition again")
#cleanup
self.tech_group.write({'users': [(3, self.demo_uid)]})
self.res_currency._columns['rate'].groups = False
@mute_logger('openerp.osv.orm')
def test_field_crud_restriction(self):
"Read/Write RPC access to restricted field should be forbidden"
# Verify the test environment first
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertFalse(has_tech_feat, "`demo` user should not belong to the restricted group")
self.assert_(self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids']))
self.assert_(self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []}))
# Now restrict access to the field and check it's forbidden
self.res_partner._columns['bank_ids'].groups = GROUP_TECHNICAL_FEATURES
with self.assertRaises(openerp.osv.orm.except_orm):
self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids'])
with self.assertRaises(openerp.osv.orm.except_orm):
self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []})
# Add the restricted group, and check that it works again
self.tech_group.write({'users': [(4, self.demo_uid)]})
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertTrue(has_tech_feat, "`demo` user should now belong to the restricted group")
self.assert_(self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids']))
self.assert_(self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []}))
#cleanup
self.tech_group.write({'users': [(3, self.demo_uid)]})
self.res_partner._columns['bank_ids'].groups = False
if __name__ == '__main__':
unittest2.main()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eli-schwartz/Sigil | src/Resource_Files/plugin_launchers/python/opf_parser.py | 1 | 11711 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Copyright (c) 2014 Kevin B. Hendricks, John Schember, and Doug Massay
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals, division, absolute_import, print_function
from compatibility_utils import unquoteurl
from unipath import pathof
import sys, os
_OPF_PARENT_TAGS = ['xml', 'package', 'metadata', 'dc-metadata', 'x-metadata', 'manifest', 'spine', 'tours', 'guide', 'bindings']
class Opf_Parser(object):
def __init__(self, opf_path, debug = False):
self._debug = debug
opf_path = pathof(opf_path)
self.opfname = os.path.basename(opf_path)
self.opf = None
with open(opf_path,'rb') as fp:
self.opf = fp.read().decode('utf-8')
self.opos = 0
self.package = None
self.metadata_attr = None
self.metadata = []
self.cover_id = None
self.manifest_id_to_href = {}
self.manifest_id_to_mime = {}
self.href_to_manifest_id = {}
self.manifest_id_to_properties = {}
self.spine = []
self.spine_ppd = None
self.guide = []
self.bindings = []
self._parseData()
# OPF tag iterator
def _opf_tag_iter(self):
tcontent = last_tattr = None
prefix = []
while True:
text, tag = self._parseopf()
if text is None and tag is None:
break
if text is not None:
tcontent = text.rstrip(" \t\v\f\r\n")
else: # we have a tag
ttype, tname, tattr = self._parsetag(tag)
if ttype == "begin":
tcontent = None
prefix.append(tname)
if tname in _OPF_PARENT_TAGS:
yield ".".join(prefix), tname, tattr, tcontent
else:
last_tattr = tattr
else: # single or end
if ttype == "end":
prefix.pop()
tattr = last_tattr
if tattr is None:
tattr = {}
last_tattr = None
elif ttype == 'single':
tcontent = None
if ttype == 'single' or (ttype == 'end' and tname not in _OPF_PARENT_TAGS):
yield ".".join(prefix), tname, tattr, tcontent
tcontent = None
# now parse the OPF to extract manifest, spine , and metadata
def _parseData(self):
cnt = 0
for prefix, tname, tattr, tcontent in self._opf_tag_iter():
if self._debug:
print (" Parsing OPF: ", prefix, tname, tattr, tcontent)
# package
if tname == "package":
ver = tattr.pop("version", "2.0")
uid = tattr.pop("unique-identifier","bookid")
self.package = (ver, uid, tattr)
continue
# metadata
if tname == "metadata":
self.metadata_attr = tattr
continue
if tname in ["meta", "link"] or tname.startswith("dc:") and "metadata" in prefix:
self.metadata.append((tname, tattr, tcontent))
if tattr.get("name","") == "cover":
self.cover_id = tattr.get("content",None)
continue
# manifest
if tname == "item" and prefix.endswith("manifest"):
nid = "xid%03d" % cnt
cnt += 1
id = tattr.pop("id", nid)
href = tattr.pop("href",'')
mtype = tattr.pop("media-type",'')
if mtype == "text/html":
mtype = "application/xhtml+xml"
href = unquoteurl(href)
properties = tattr.pop("properties",None)
self.manifest_id_to_href[id] = href
self.manifest_id_to_mime[id] = mtype
self.href_to_manifest_id[href] = id
self.manifest_id_to_properties[id] = properties
continue
# spine
if tname == "spine":
if tattr is not None:
self.spine_ppd = tattr.get("page-progression-direction", None)
continue
if tname == "itemref" and prefix.endswith("spine"):
idref = tattr.pop("idref", "")
linear = tattr.pop("linear", None)
properties = tattr.pop("properties", None)
self.spine.append((idref, linear, properties))
continue
# guide
if tname == "reference" and prefix.endswith("guide"):
type = tattr.pop("type",'')
title = tattr.pop("title",'')
href = unquoteurl(tattr.pop("href",''))
self.guide.append((type, title, href))
continue
# bindings (stored but ignored for now)
if tname in ["mediaTypes", "mediatypes"] and prefix.endswith("bindings"):
mtype = tattr.pop("media-type","")
handler = tattr.pop("handler","")
self.bindings.append((mtype, handler))
continue
# parse and return either leading text or the next tag
def _parseopf(self):
p = self.opos
if p >= len(self.opf):
return None, None
if self.opf[p] != '<':
res = self.opf.find('<',p)
if res == -1 :
res = len(self.opf)
self.opos = res
return self.opf[p:res], None
# handle comment as a special case
if self.opf[p:p+4] == '<!--':
te = self.opf.find('-->',p+1)
if te != -1:
te = te+2
else:
te = self.opf.find('>',p+1)
ntb = self.opf.find('<',p+1)
if ntb != -1 and ntb < te:
self.opos = ntb
return self.opf[p:ntb], None
self.opos = te + 1
return None, self.opf[p:te+1]
# parses tag to identify: [tname, ttype, tattr]
# tname: tag name, ttype: tag type ('begin', 'end' or 'single');
# tattr: dictionary of tag atributes
def _parsetag(self, s):
n = len(s)
p = 1
tname = None
ttype = None
tattr = {}
while p < n and s[p:p+1] == ' ' : p += 1
if s[p:p+1] == '/':
ttype = 'end'
p += 1
while p < n and s[p:p+1] == ' ' : p += 1
b = p
while p < n and s[p:p+1] not in ('>', '/', ' ', '"', "'","\r","\n") : p += 1
tname=s[b:p].lower()
# remove redundant opf: namespace prefixes on opf tags
if tname.startswith("opf:"):
tname = tname[4:]
# some special cases
if tname.startswith("!--"):
ttype = 'single'
comment = s[4:-3].strip()
tattr['comment'] = comment
if tname == "?xml":
tname = "xml"
if ttype is None:
# parse any attributes of begin or single tags
while s.find('=',p) != -1 :
while p < n and s[p:p+1] == ' ' : p += 1
b = p
while p < n and s[p:p+1] != '=' : p += 1
aname = s[b:p].lower()
aname = aname.rstrip(' ')
p += 1
while p < n and s[p:p+1] == ' ' : p += 1
if s[p:p+1] in ('"', "'") :
qt = s[p:p+1]
p = p + 1
b = p
while p < n and s[p:p+1] != qt: p += 1
val = s[b:p]
p += 1
else :
b = p
while p < n and s[p:p+1] not in ('>', '/', ' ') : p += 1
val = s[b:p]
tattr[aname] = val
if ttype is None:
ttype = 'begin'
if s.find('/',p) >= 0:
ttype = 'single'
return ttype, tname, tattr
def handle_quoted_attribute_values(self, value):
if '"' in value:
value = value.replace('"', """)
return value
def taginfo_toxml(self, taginfo):
res = []
tname, tattr, tcontent = taginfo
res.append('<' + tname)
if tattr is not None:
for key in tattr:
val = self.handle_quoted_attribute_values(tattr[key])
res.append(' ' + key + '="'+val+'"' )
if tcontent is not None:
res.append('>' + tcontent + '</' + tname + '>\n')
else:
res.append('/>\n')
return "".join(res)
def get_package_tag(self):
(ver, uid, tattr) = self.package
packout = []
packout.append('<package version="%s" unique-identifier="%s"' % (ver, uid))
if tattr is not None:
for key in tattr:
val = self.handle_quoted_attribute_values(tattr[key])
packout.append(' %s="%s"' % (key, val))
packout.append(">\n")
return "".join(packout)
def get_metadataxml(self):
data = []
tattr = self.metadata_attr
tag = "<metadata"
if tattr is not None:
for key in tattr:
val = self.handle_quoted_attribute_values(tattr[key])
tag += ' ' + key + '="'+val+'"'
tag += '>\n'
data.append(tag)
for taginfo in self.metadata:
data.append(self.taginfo_toxml(taginfo))
data.append('</metadata>\n')
return "".join(data)
def get_metadata_attr(self):
return self.metadata_attr
# list of (tname, tattr, tcontent)
def get_metadata(self):
return self.metadata
def get_manifest_id_to_href_dict(self):
return self.manifest_id_to_href
def get_manifest_id_to_mime_dict(self):
return self.manifest_id_to_mime
def get_href_to_manifest_id_dict(self):
return self.href_to_manifest_id
def get_manifest_id_to_properties_dict(self):
return self.manifest_id_to_properties
def get_spine_ppd(self):
return self.spine_ppd
# list of (idref, linear, properties)
def get_spine(self):
return self.spine
# list of (type, title, href)
def get_guide(self):
return self.guide
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/boto/swf/__init__.py | 145 | 1792 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
from boto.regioninfo import get_regions, load_regions
import boto.swf.layer1
REGION_ENDPOINTS = load_regions().get('swf', {})
def regions(**kw_params):
"""
Get all available regions for the Amazon Simple Workflow service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return get_regions('swf', connection_cls=boto.swf.layer1.Layer1)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| gpl-2.0 |
hhorii/cassandra | pylib/cqlshlib/test/ansi_colors.py | 192 | 6073 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
LIGHT = 010
ansi_CSI = '\033['
ansi_seq = re.compile(re.escape(ansi_CSI) + r'(?P<params>[\x20-\x3f]*)(?P<final>[\x40-\x7e])')
ansi_cmd_SGR = 'm' # set graphics rendition
color_defs = (
(000, 'k', 'black'),
(001, 'r', 'dark red'),
(002, 'g', 'dark green'),
(003, 'w', 'brown', 'dark yellow'),
(004, 'b', 'dark blue'),
(005, 'm', 'dark magenta', 'dark purple'),
(006, 'c', 'dark cyan'),
(007, 'n', 'light grey', 'light gray', 'neutral', 'dark white'),
(010, 'B', 'dark grey', 'dark gray', 'light black'),
(011, 'R', 'red', 'light red'),
(012, 'G', 'green', 'light green'),
(013, 'Y', 'yellow', 'light yellow'),
(014, 'B', 'blue', 'light blue'),
(015, 'M', 'magenta', 'purple', 'light magenta', 'light purple'),
(016, 'C', 'cyan', 'light cyan'),
(017, 'W', 'white', 'light white'),
)
colors_by_num = {}
colors_by_letter = {}
colors_by_name = {}
letters_by_num = {}
for colordef in color_defs:
colorcode = colordef[0]
colorletter = colordef[1]
colors_by_num[colorcode] = nameset = set(colordef[2:])
colors_by_letter[colorletter] = colorcode
letters_by_num[colorcode] = colorletter
for c in list(nameset):
# equivalent names without spaces
nameset.add(c.replace(' ', ''))
for c in list(nameset):
# with "bright" being an alias for "light"
nameset.add(c.replace('light', 'bright'))
for c in nameset:
colors_by_name[c] = colorcode
class ColoredChar:
def __init__(self, c, colorcode):
self.c = c
self._colorcode = colorcode
def colorcode(self):
return self._colorcode
def plain(self):
return self.c
def __getattr__(self, name):
return getattr(self.c, name)
def ansi_color(self):
clr = str(30 + (07 & self._colorcode))
if self._colorcode & 010:
clr = '1;' + clr
return clr
def __str__(self):
return "<%s '%r'>" % (self.__class__.__name__, self.colored_repr())
__repr__ = __str__
def colored_version(self):
return '%s0;%sm%s%s0m' % (ansi_CSI, self.ansi_color(), self.c, ansi_CSI)
def colored_repr(self):
if self.c == "'":
crepr = r"\'"
elif self.c == '"':
crepr = self.c
else:
crepr = repr(self.c)[1:-1]
return '%s0;%sm%s%s0m' % (ansi_CSI, self.ansi_color(), crepr, ansi_CSI)
def colortag(self):
return lookup_letter_from_code(self._colorcode)
class ColoredText:
def __init__(self, source=''):
if isinstance(source, basestring):
plain, colors = self.parse_ansi_colors(source)
self.chars = map(ColoredChar, plain, colors)
else:
# expected that source is an iterable of ColoredChars (or duck-typed as such)
self.chars = tuple(source)
def splitlines(self):
lines = [[]]
for c in self.chars:
if c.plain() == '\n':
lines.append([])
else:
lines[-1].append(c)
return [self.__class__(line) for line in lines]
def plain(self):
return ''.join([c.plain() for c in self.chars])
def __getitem__(self, index):
return self.chars[index]
@classmethod
def parse_ansi_colors(cls, source):
# note: strips all control sequences, even if not SGRs.
colors = []
plain = ''
last = 0
curclr = 0
for match in ansi_seq.finditer(source):
prevsegment = source[last:match.start()]
plain += prevsegment
colors.extend([curclr] * len(prevsegment))
if match.group('final') == ansi_cmd_SGR:
try:
curclr = cls.parse_sgr_param(curclr, match.group('params'))
except ValueError:
pass
last = match.end()
prevsegment = source[last:]
plain += prevsegment
colors.extend([curclr] * len(prevsegment))
return ''.join(plain), colors
@staticmethod
def parse_sgr_param(curclr, paramstr):
oldclr = curclr
args = map(int, paramstr.split(';'))
for a in args:
if a == 0:
curclr = lookup_colorcode('neutral')
elif a == 1:
curclr |= LIGHT
elif 30 <= a <= 37:
curclr = (curclr & LIGHT) | (a - 30)
else:
# not supported renditions here; ignore for now
pass
return curclr
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, ''.join([c.colored_repr() for c in self.chars]))
__str__ = __repr__
def __iter__(self):
return iter(self.chars)
def colored_version(self):
return ''.join([c.colored_version() for c in self.chars])
def colortags(self):
return ''.join([c.colortag() for c in self.chars])
def lookup_colorcode(name):
return colors_by_name[name]
def lookup_colorname(code):
return colors_by_num.get(code, 'Unknown-color-0%o' % code)
def lookup_colorletter(letter):
return colors_by_letter[letter]
def lookup_letter_from_code(code):
letr = letters_by_num.get(code, ' ')
if letr == 'n':
letr = ' '
return letr
| apache-2.0 |
marcosmodesto/django-testapp | django/django/contrib/gis/tests/geoapp/models.py | 93 | 1686 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| bsd-3-clause |
raymondnijssen/QGIS | python/plugins/processing/preconfigured/PreconfiguredUtils.py | 50 | 1383 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PreconfiguredUtils.py
---------------------
Date : April 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import os
from processing.tools.system import mkdir, userFolder
def preconfiguredAlgorithmsFolder():
folder = str(os.path.join(userFolder(), 'preconfigured'))
mkdir(folder)
return folder
def algAsDict(alg):
params = {}
for param in alg.parameters:
params[param.name] = param.value
outputs = {}
for out in alg.outputs:
outputs[out.name] = out.value
return {"parameters": params, "outputs": outputs, "algname": alg.id()}
| gpl-2.0 |
danielbonetto/twig_MVC | build/complete_upgrade.py | 3 | 1798 | # Run simpletests as an admin
import mechanize, sys, re
# Get base url from arguments
if len(sys.argv) != 2:
print 'Incorrect number of arguments, expects one'
sys.exit(2)
rooturl = sys.argv[1]
# Run tests
print 'Root url: %s' % rooturl
print 'Open login page'
mech = mechanize.Browser()
mech.open(rooturl+'login/index.php')
print 'Login as admin'
mech.select_form(nr=1)
mech["username"] = "admin"
mech["password"] = "passworD1!"
mech.submit()
print 'Hit notifications page'
url = rooturl+'admin/index.php'
count = 0
while 1:
# Set max loop
if count >= 20:
print 'ERROR: Notifications page appears broken, aborting'
break
count += 1
notif = mech.open(url)
content = notif.read()
next_url = None
# Check for continue button
if not next_url:
exp = re.compile('<form action="([^"]+)" method="get"><div><input type="submit" value="Continue"')
if exp.search(content):
next_url = exp.search(content).group(1)
print 'Pressing continue...'
# Check for major upgrade
if not next_url:
exp = re.compile('Upgrading Totara database...')
if exp.search(content):
next_url = rooturl + 'admin/index.php?confirmupgrade=1&confirmrelease=1&confirmplugincheck=1'
print 'Confirming upgrade...'
# Check for Save Changes button (means we are on the upgradesettings.php page)
if not next_url:
exp = re.compile('<input class="form-submit" type="submit" value="Save Changes"')
if exp.search(content):
mech.select_form(nr=3)
mech.submit()
next_url = rooturl + 'admin/index.php'
print 'Saving new settings...'
if not next_url:
break
url = next_url
print '(done %d times)' % count
| gpl-3.0 |
blakedewey/nipype | nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py | 9 | 1430 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import MultiImageMaths
def test_MultiImageMaths_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
op_string=dict(argstr='%s',
mandatory=True,
position=4,
),
operand_files=dict(mandatory=True,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = MultiImageMaths.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MultiImageMaths_outputs():
output_map = dict(out_file=dict(),
)
outputs = MultiImageMaths.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
kendazheng/wizcloud2 | wizcloud/wizresource/zapi.py | 1 | 2298 | import time
from pyzabbix import ZabbixAPI
from settings import ZabbixAPI_URL, ZabbixAPI_USER, ZabbixAPI_PASSWD
def getZabbixHistory(key, hostname, lastminutes):
zapi = ZabbixAPI(ZabbixAPI_URL, timeout=5)
zapi.login(ZabbixAPI_USER, ZabbixAPI_PASSWD)
hosts = zapi.host.get(filter={"host": hostname})
if not hosts:
return {
"code": 970101,
"msg": "No host with hostname: %s" % hostname,
"data": None
}
else:
hostid = hosts[0]['hostid']
items = zapi.item.get(hostids=hostid, filter={"key_": key})
if not items:
return {
"code": 970102,
"msg": "No item with hostid: %s, key: %s" % (hostid, key),
"data": None
}
else:
itemid = items[0]['itemid']
value_type = items[0]['value_type']
time_till = time.time()
time_from = time_till - (lastminutes * 60)
historys = zapi.history.get(history=value_type,
hostids=hostid,
itemids=itemid,
time_from=time_from,
time_till=time_till,
output="extend")
if not historys:
return {
"code": 970103,
"msg":
"No history with value_type: %s, hostid: %s, itemid: %s, time_from: %s, time_till: %s"
% (value_type, hostid, itemid, time_from, time_till),
"data": None
}
else:
return {"code": 0, "msg": "success", "data": historys}
def createZabbixTrigger(hostname, expression, description):
zapi = ZabbixAPI(ZabbixAPI_URL, timeout=5)
zapi.login(ZabbixAPI_USER, ZabbixAPI_PASSWD)
resapi = zapi.trigger.create(expression=expression, description=description, priority=5)
print resapi
return resapi["triggerids"][0]
def deleteZabbixTrigger(triggerid):
zapi = ZabbixAPI(ZabbixAPI_URL, timeout=5)
zapi.login(ZabbixAPI_USER, ZabbixAPI_PASSWD)
resapi = zapi.trigger.delete(triggerid)
print resapi
return resapi["triggerids"][0]
| apache-2.0 |
miguelparaiso/PracticaOdoo | addons/hr_payroll_account/wizard/__init__.py | 433 | 1116 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
frankvdp/django | tests/schema/fields.py | 68 | 2806 | from functools import partial
from django.db import models
from django.db.models.fields.related import (
RECURSIVE_RELATIONSHIP_CONSTANT, ManyToManyDescriptor, ManyToManyField,
ManyToManyRel, RelatedField, create_many_to_many_intermediary_model,
)
class CustomManyToManyField(RelatedField):
"""
Ticket #24104 - Need to have a custom ManyToManyField,
which is not an inheritor of ManyToManyField.
"""
many_to_many = True
def __init__(self, to, db_constraint=True, swappable=True, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_table=None, **kwargs):
try:
to._meta
except AttributeError:
to = str(to)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical if symmetrical is not None else (to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = db_table
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super().__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
super().contribute_to_class(cls, name, **kwargs)
if not self.remote_field.through and not cls._meta.abstract and not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field))
self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta)
def get_internal_type(self):
return 'ManyToManyField'
# Copy those methods from ManyToManyField because they don't call super() internally
contribute_to_related_class = ManyToManyField.__dict__['contribute_to_related_class']
_get_m2m_attr = ManyToManyField.__dict__['_get_m2m_attr']
_get_m2m_reverse_attr = ManyToManyField.__dict__['_get_m2m_reverse_attr']
_get_m2m_db_table = ManyToManyField.__dict__['_get_m2m_db_table']
class InheritedManyToManyField(ManyToManyField):
pass
class MediumBlobField(models.BinaryField):
"""
A MySQL BinaryField that uses a different blob size.
"""
def db_type(self, connection):
return 'MEDIUMBLOB'
| bsd-3-clause |
gsantovena/marathon | tests/shakedown/shakedown/clients/marathon.py | 2 | 25377 | import json
import logging
from six.moves import urllib
from . import dcos_service_url, rpcclient
from .. import util
from ..errors import DCOSException
logger = logging.getLogger(__name__)
def create_client(marathon_service_name='marathon', auth_token=None):
"""Creates a Marathon client with the supplied configuration.
:param marathon_service_name: Marathon service name
:param auth_token: DC/OS acs auth token
:returns: Marathon client
:rtype: shakedown.clients.marathon.Client
"""
marathon_url = dcos_service_url(marathon_service_name)
rpc_client = rpcclient.create_client(marathon_url, auth_token=auth_token)
logger.info('Creating marathon client with: %r', marathon_url)
return Client(rpc_client)
class Client(object):
"""Class for talking to the Marathon server.
:param rpc_client: provides a method for making HTTP requests
:type rpc_client: _RpcClient
"""
def __init__(self, rpc_client):
self._rpc = rpc_client
def get_about(self):
"""Returns info about Marathon instance
:returns Marathon information
:rtype: dict
"""
response = self._rpc.session.get('v2/info')
return response.json()
def ping(self):
"""Hits the Marathon ping endpoint
:returns Pong
:rtype: str
"""
response = self._rpc.session.get('ping')
return response.text
def get_app(self, app_id, version=None):
"""Returns a representation of the requested application version. If
version is None the return the latest version.
:param app_id: the ID of the application
:type app_id: str
:param version: application version as a ISO8601 datetime
:type version: str
:returns: the requested Marathon application
:rtype: dict
"""
app_id = util.normalize_marathon_id_path(app_id)
if version is None:
path = 'v2/apps{}'.format(app_id)
else:
path = 'v2/apps{}/versions/{}'.format(app_id, version)
response = self._rpc.session.get(path)
response.raise_for_status()
# Looks like Marathon return different JSON for versions
if version is None:
return response.json().get('app')
else:
return response.json()
def get_groups(self):
"""Get a list of known groups.
:returns: list of known groups
:rtype: list of dict
"""
response = self._rpc.session.get('v2/groups')
return response.json().get('groups')
def get_group(self, group_id, version=None):
"""Returns a representation of the requested group version. If
version is None the return the latest version.
:param group_id: the ID of the application
:type group_id: str
:param version: application version as a ISO8601 datetime
:type version: str
:returns: the requested Marathon application
:rtype: dict
"""
group_id = util.normalize_marathon_id_path(group_id)
if version is None:
path = 'v2/groups{}'.format(group_id)
else:
path = 'v2/groups{}/versions/{}'.format(group_id, version)
response = self._rpc.session.get(path)
return response.json()
def get_app_versions(self, app_id, max_count=None):
"""Asks Marathon for all the versions of the Application up to a
maximum count.
:param app_id: the ID of the application or group
:type app_id: str
:param max_count: the maximum number of version to fetch
:type max_count: int
:returns: a list of all the version of the application
:rtype: [str]
"""
if max_count is not None and max_count <= 0:
raise DCOSException(
'Maximum count must be a positive number: {}'.format(max_count)
)
app_id = util.normalize_marathon_id_path(app_id)
path = 'v2/apps{}/versions'.format(app_id)
response = self._rpc.session.get(path)
if max_count is None:
return response.json().get('versions')
else:
return response.json().get('versions')[:max_count]
def get_apps(self):
"""Get a list of known applications.
:returns: list of known applications
:rtype: [dict]
"""
response = self._rpc.session.get('v2/apps')
return response.json().get('apps')
def get_apps_for_framework(self, framework_name):
""" Return all apps running the given framework.
:param framework_name: framework name
:type framework_name: str
:rtype: [dict]
"""
return [app for app in self.get_apps()
if app.get('labels', {}).get(
'DCOS_PACKAGE_FRAMEWORK_NAME') == framework_name]
def add_app(self, app_resource):
"""Add a new application.
:param app_resource: application resource
:type app_resource: dict, bytes or file
:returns: the application description
:rtype: dict
"""
# The file type exists only in Python 2, preventing type(...) is file.
if hasattr(app_resource, 'read'):
app_json = json.load(app_resource)
else:
app_json = app_resource
response = self._rpc.session.post('v2/apps', json=app_json)
response.raise_for_status()
return response.json().get('deployments', {})[0].get('id')
def _update_req(
self, resource_type, resource_id, resource_json, force=False):
"""Send an HTTP request to update an application, group, or pod.
:param resource_type: one of 'apps', 'groups', or 'pods'
:type resource_type: str
:param resource_id: the app, group, or pod ID
:type resource_id: str
:param resource_json: the json payload
:type resource_json: {}
:param force: whether to override running deployments
:type force: bool
:returns: the response from Marathon
:rtype: requests.Response
"""
path_template = 'v2/{}/{{}}'.format(resource_type)
path = self._marathon_id_path_format(path_template, resource_id)
params = self._force_params(force)
return self._rpc.session.put(path, params=params, json=resource_json)
def _update(self, resource_type, resource_id, resource_json, force=False):
"""Update an application or group.
The HTTP response is handled differently for pods; see `update_pod`.
:param resource_type: either 'apps' or 'groups'
:type resource_type: str
:param resource_id: the app or group ID
:type resource_id: str
:param resource_json: the json payload
:type resource_json: {}
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: str
"""
response = self._update_req(
resource_type, resource_id, resource_json, force)
body_json = self._parse_json(response)
try:
return body_json.get('deploymentId')
except KeyError:
template = ('Error: missing "deploymentId" field in the following '
'JSON response from Marathon:\n{}')
rendered_json = json.dumps(body_json, indent=2, sort_keys=True)
raise DCOSException(template.format(rendered_json))
def update_app(self, app_id, payload, force=False):
"""Update an application.
:param app_id: the application id
:type app_id: str
:param payload: the json payload
:type payload: dict
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: str
"""
return self._update('apps', app_id, payload, force)
def update_group(self, group_id, payload, force=False):
"""Update a group.
:param group_id: the group id
:type group_id: str
:param payload: the json payload
:type payload: dict
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: str
"""
return self._update('groups', group_id, payload, force)
def scale_app(self, app_id, instances, force=False):
"""Scales an application to the requested number of instances.
:param app_id: the ID of the application to scale
:type app_id: str
:param instances: the requested number of instances
:type instances: int
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: str
"""
app_id = util.normalize_marathon_id_path(app_id)
params = self._force_params(force)
path = 'v2/apps{}'.format(app_id)
response = self._rpc.session.put(path, params=params, json={'instances': int(instances)})
deployment = response.json().get('deploymentId')
return deployment
def scale_group(self, group_id, scale_factor, force=False):
"""Scales a group with the requested scale-factor.
:param group_id: the ID of the group to scale
:type group_id: str
:param scale_factor: the requested value of scale-factor
:type scale_factor: float
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: bool
"""
group_id = util.normalize_marathon_id_path(group_id)
params = self._force_params(force)
path = 'v2/groups{}'.format(group_id)
response = self._rpc.session.put(path, params=params, json={'scaleBy': scale_factor})
deployment = response.json().get('deploymentId')
return deployment
def stop_app(self, app_id, force=False):
"""Scales an application to zero instances.
:param app_id: the ID of the application to stop
:type app_id: str
:param force: whether to override running deployments
:type force: bool
:returns: the resulting deployment ID
:rtype: bool
"""
return self.scale_app(app_id, 0, force)
def remove_app(self, app_id, force=False):
"""Completely removes the requested application.
:param app_id: the ID of the application to remove
:type app_id: str
:param force: whether to override running deployments
:type force: bool
:rtype: None
"""
app_id = util.normalize_marathon_id_path(app_id)
params = self._force_params(force)
path = 'v2/apps{}'.format(app_id)
self._rpc.session.delete(path, params=params)
def remove_group(self, group_id, force=False):
"""Completely removes the requested application.
:param group_id: the ID of the application to remove
:type group_id: str
:param force: whether to override running deployments
:type force: bool
:rtype: None
"""
group_id = util.normalize_marathon_id_path(group_id)
params = self._force_params(force)
path = 'v2/groups{}'.format(group_id)
response = self._rpc.session.delete(path, params=params)
return response.json()
def kill_tasks(self, app_id, scale=None, host=None):
"""Kills the tasks for a given application,
and can target a given agent, with a future target scale
:param app_id: the id of the application to restart
:type app_id: str
:param scale: Scale the app down after killing the specified tasks
:type scale: bool
:param host: host to target restarts on
:type host: string
"""
params = {}
app_id = util.normalize_marathon_id_path(app_id)
if host:
params['host'] = host
if scale:
params['scale'] = scale
path = 'v2/apps{}/tasks'.format(app_id)
response = self._rpc.session.delete(path, params=params)
return response.json()
def kill_and_scale_tasks(self, task_ids, scale=None, wipe=None):
"""Kills the tasks for a given application,
and can target a given agent, with a future target scale
:param task_ids: a list of task ids to kill
:type task_ids: list
:param scale: Scale the app down after killing the specified tasks
:type scale: bool
:param wipe: whether remove reservations and persistent volumes.
:type wipe: bool
:returns: If scale=false, all tasks that were killed are returned.
If scale=true, than a deployment is triggered and the
deployment id and version returned.
:rtype: list | dict
"""
params = {}
path = 'v2/tasks/delete'
if scale:
params['scale'] = scale
if wipe:
params['wipe'] = wipe
response = self._rpc.session.post(path, params=params, json={'ids': task_ids})
return response.json()
def restart_app(self, app_id, force=False):
"""Performs a rolling restart of all of the tasks.
:param app_id: the id of the application to restart
:type app_id: str
:param force: whether to override running deployments
:type force: bool
:returns: the deployment id and version
:rtype: dict
"""
app_id = util.normalize_marathon_id_path(app_id)
params = self._force_params(force)
path = 'v2/apps{}/restart'.format(app_id)
response = self._rpc.session.post(path, params=params)
return response.json()
def get_deployment(self, deployment_id):
"""Returns a deployment.
:param deployment_id: the deployment id
:type deployment_id: str
:returns: a deployment
:rtype: dict
"""
response = self._rpc.session.get('v2/deployments')
deployment = next(
(deployment for deployment in response.json()
if deployment_id == deployment['id']),
None)
return deployment
def get_deployments(self, app_id=None):
"""Returns a list of deployments, optionally limited to an app.
:param app_id: the id of the application
:type app_id: str
:returns: a list of deployments
:rtype: list of dict
"""
response = self._rpc.session.get('v2/deployments')
if app_id is not None:
app_id = util.normalize_marathon_id_path(app_id)
deployments = [
deployment for deployment in response.json()
if app_id in deployment['affectedApps']
]
else:
deployments = response.json()
return deployments
def _cancel_deployment(self, deployment_id, force):
"""Cancels an application deployment.
:param deployment_id: the deployment id
:type deployment_id: str
:param force: if set to `False`, stop the deployment and
create a new rollback deployment to reinstate the
previous configuration. If set to `True`, simply stop the
deployment.
:type force: bool
:returns: cancelation deployment
:rtype: dict
"""
params = self._force_params(force)
path = 'v2/deployments/{}'.format(deployment_id)
response = self._rpc.session.delete(path, params=params)
if force:
return None
else:
return response.json()
def rollback_deployment(self, deployment_id):
"""Rolls back an application deployment.
:param deployment_id: the deployment id
:type deployment_id: str
:returns: cancelation deployment
:rtype: dict
"""
return self._cancel_deployment(deployment_id, False)
def stop_deployment(self, deployment_id):
"""Stops an application deployment.
:param deployment_id: the deployment id
:type deployment_id: str
:rtype: None
"""
self._cancel_deployment(deployment_id, True)
def get_tasks(self, app_id):
"""Returns a list of tasks, optionally limited to an app.
:param app_id: the id of the application to restart
:type app_id: str
:returns: a list of tasks
:rtype: [dict]
"""
response = self._rpc.session.get('v2/tasks')
if app_id is not None:
app_id = util.normalize_marathon_id_path(app_id)
tasks = [
task for task in response.json()['tasks']
if app_id == task['appId']
]
else:
tasks = response.json()['tasks']
return tasks
def get_task(self, task_id):
"""Returns a task
:param task_id: the id of the task
:type task_id: str
:returns: a tasks
:rtype: dict
"""
response = self._rpc.session.get('v2/tasks')
task = next(
(task for task in response.json()['tasks']
if task_id == task['id']),
None)
return task
def stop_task(self, task_id, wipe=None):
"""Stops a task.
:param task_id: the ID of the task
:type task_id: str
:param wipe: whether remove reservations and persistent volumes.
:type wipe: bool
:returns: a tasks
:rtype: dict
"""
if not wipe:
params = None
else:
params = {'wipe': 'true'}
response = self._rpc.session.post('v2/tasks/delete', params=params, json={'ids': [task_id]})
task = next(
(task for task in response.json()['tasks']
if task_id == task['id']),
None)
return task
def create_group(self, group_resource):
"""Add a new group.
:param group_resource: grouplication resource
:type group_resource: dict, bytes or file
:returns: the group description
:rtype: dict
"""
# The file type exists only in Python 2, preventing type(...) is file.
if hasattr(group_resource, 'read'):
group_json = json.load(group_resource)
else:
group_json = group_resource
response = self._rpc.session.post('v2/groups', json=group_json)
return response.json().get("deploymentId")
def get_leader(self):
""" Get the leading marathon instance.
:returns: string of the form <ip>:<port>
:rtype: str
"""
response = self._rpc.session.get('v2/leader')
return response.json().get('leader')
def delete_leader(self):
""" Delete the leading marathon instance.
"""
response = self._rpc.session.delete('v2/leader')
return response.json()
def add_pod(self, pod_json):
"""Add a new pod.
:param pod_json: JSON pod definition
:type pod_json: dict
:returns: description of created pod
:rtype: dict
"""
response = self._rpc.session.post('v2/pods', json=pod_json)
response.raise_for_status()
return response.headers.get('Marathon-Deployment-Id')
def remove_pod(self, pod_id, force=False):
"""Completely removes the requested pod.
:param pod_id: the ID of the pod to remove
:type pod_id: str
:param force: whether to override running deployments
:type force: bool
:rtype: None
"""
path = self._marathon_id_path_format('v2/pods/{}', pod_id)
params = self._force_params(force)
response = self._rpc.session.delete(path, params=params)
response.raise_for_status()
def show_pod(self, pod_id):
"""Returns a representation of the requested pod.
:param pod_id: the ID of the pod
:type pod_id: str
:returns: the requested Marathon pod
:rtype: dict
"""
path = self._marathon_id_path_format('v2/pods/{}::status', pod_id)
response = self._rpc.session.get(path)
response.raise_for_status()
return self._parse_json(response)
def list_pod(self):
"""Get a list of known pods.
:returns: list of known pods
:rtype: [dict]
"""
response = self._rpc.session.get('v2/pods/::status')
response.raise_for_status()
return self._parse_json(response)
def update_pod(self, pod_id, pod_json, force=False):
"""Update a pod.
:param pod_id: the pod ID
:type pod_id: str
:param pod_json: JSON pod definition
:type pod_json: {}
:param force: whether to override running deployments
:type force: bool
:rtype: None
"""
response = self._update_req('pods', pod_id, pod_json, force)
deployment_id_header_name = 'Marathon-Deployment-Id'
deployment_id = response.headers.get(deployment_id_header_name)
if deployment_id is None:
template = 'Error: missing "{}" header from Marathon response'
raise DCOSException(template.format(deployment_id_header_name))
return deployment_id
def kill_pod_instances(self, pod_id, instance_ids):
"""Kills the given instances of the specified pod.
:param pod_id: the pod to delete instances from
:type pod_id: str
:param instance_ids: the IDs of the instances to kill
:type instance_ids: [str]
:returns: the status JSON objects for the killed instances
:rtype: [{}]
"""
path = self._marathon_id_path_format('v2/pods/{}::instances', pod_id)
response = self._rpc.session.delete(path, json=instance_ids)
return self._parse_json(response)
def get_queued_app(self, app_id):
"""Returns app information inside the launch queue.
:param app_id: the app id
:type app_id: str
:returns: app information inside the launch queue
:rtype: dict
"""
response = self._rpc.session.get('v2/queue?embed=lastUnusedOffers')
app = next(
(app for app in response.json().get('queue')
if app_id == get_app_or_pod_id(app)),
None)
return app
def get_queued_apps(self):
"""Returns the content of the launch queue,
including the apps which should be scheduled.
:returns: a list of to be scheduled apps, including debug information
:rtype: list of dict
"""
response = self._rpc.session.get('v2/queue')
return response.json().get('queue')
def get_plugins(self):
"""Get a list of known plugins.
:returns: list of known plugins
:rtype: [dict]
"""
response = self._rpc.session.get('v2/plugins')
return response.json()
@staticmethod
def _marathon_id_path_format(url_path_template, id_path):
"""Substitutes a Marathon "ID path" into a URL path format string,
ensuring the result is well-formed.
All leading and trailing slashes in the ID will be removed, and the ID
will have all URL-unsafe characters escaped, as if by
urllib.parse.quote().
:param url_path_template: format string for the path portion of a URL,
with a single format specifier (i.e. {})
where the "ID path" should be inserted
:type url_path_template: str
:param id_path: a Marathon "ID path", e.g. app, group, or pod ID
:type id_path: str
:returns: the url path template with the ID inserted
:rtype: str
"""
normalized_id_path = urllib.parse.quote(id_path.strip('/'))
return url_path_template.format(normalized_id_path)
@staticmethod
def _force_params(force):
"""Returns the query parameters that signify the provided force value.
:param force: whether to override running deployments
:type force: bool
:rtype: {} | None
"""
return {'force': 'true'} if force else None
@staticmethod
def _parse_json(response):
"""Attempts to parse the body of the given response as JSON.
Raises DCOSException if parsing fails.
:param response: the response containing the body to parse
:type response: requests.Response
:return: the parsed JSON
:rtype: {} | [] | str | int | float | bool | None
"""
try:
return response.json()
except Exception:
template = ('Error: Response from Marathon was not in expected '
'JSON format:\n{}')
raise DCOSException(template.format(response.text))
def get_app_or_pod_id(app_or_pod):
"""Gets the app or pod ID from the given app or pod
:param app_or_pod: app or pod definition
:type app_or_pod: requests.Response
:return: app or pod id
:rtype: str
"""
return app_or_pod.get('app', app_or_pod.get('pod', {})).get('id')
| apache-2.0 |
lfdesousa/pinject | pinject/initializers_test.py | 2 | 3576 | """Copyright 2013 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import unittest
from pinject import errors
from pinject import initializers
class CopyArgsToInternalFieldsTest(unittest.TestCase):
def test_does_nothing_extra_for_zero_arg_initializer(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self):
self.forty_two = 42
self.assertEqual(42, SomeClass().forty_two)
def test_copies_positional_arg_to_internal_field(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, foo):
pass
self.assertEqual('foo', SomeClass('foo')._foo)
def test_copies_keyword_arg_to_internal_field(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, foo):
pass
self.assertEqual('foo', SomeClass(foo='foo')._foo)
def test_copies_kwargs_to_internal_fields(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, **kwargs):
pass
self.assertEqual('foo', SomeClass(foo='foo')._foo)
def test_raises_exception_if_keyword_arg_unknown(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, bar):
pass
self.assertRaises(TypeError, SomeClass, foo='foo')
def test_maintains_signature(self):
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, foo):
pass
self.assertEqual('__init__', SomeClass.__init__.__name__)
arg_names, unused_varargs, unused_keywords, unused_defaults = (
inspect.getargspec(SomeClass.__init__))
self.assertEqual(['self', 'foo'], arg_names)
def test_raises_exception_if_init_takes_pargs(self):
def do_bad_initializer():
class SomeClass(object):
@initializers.copy_args_to_internal_fields
def __init__(self, *pargs):
pass
self.assertRaises(errors.PargsDisallowedWhenCopyingArgsError,
do_bad_initializer)
def test_raises_exception_if_not_applied_to_init(self):
def do_bad_decorated_fn():
@initializers.copy_args_to_internal_fields
def some_function(foo, bar):
pass
self.assertRaises(errors.DecoratorAppliedToNonInitError,
do_bad_decorated_fn)
class CopyArgsToPublicFieldsTest(unittest.TestCase):
def test_uses_no_field_prefix(self):
class SomeClass(object):
@initializers.copy_args_to_public_fields
def __init__(self, foo):
pass
self.assertEqual('foo', SomeClass('foo').foo)
# Other functionality is tested as part of testing
# copy_args_to_internal_fields().
| apache-2.0 |
upliftaero/MissionPlanner | Lib/site-packages/scipy/linalg/tests/test_basic.py | 51 | 19160 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.basic module
"""
"""
Bugs:
1) solve.check_random_sym_complex fails if a is complex
and transpose(a) = conjugate(a) (a is Hermitian).
"""
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_basic.py
"""
import sys
from numpy import arange, array, dot, zeros, identity, conjugate, transpose, \
float32
import numpy.linalg as linalg
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, dec
from scipy.linalg import solve, inv, det, lstsq, pinv, pinv2, norm,\
solve_banded, solveh_banded, solve_triangular
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestSolveBanded(TestCase):
def test_real(self):
a = array([[ 1.0, 20, 0, 0],
[ -30, 4, 6, 0],
[ 2, 1, 20, 2],
[ 0, -1, 7, 14]])
ab = array([[ 0.0, 20, 6, 2],
[ 1, 4, 20, 14],
[ -30, 1, 7, 0],
[ 2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[ 2, 1],
[-30, 4],
[ 2, 3],
[ 1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[ 1.0, 20, 0, 0],
[ -30, 4, 6, 0],
[ 2j, 1, 20, 2j],
[ 0, -1, 7, 14]])
ab = array([[ 0.0, 20, 6, 2j],
[ 1, 4, 20, 14],
[ -30, 1, 7, 0],
[ 2j, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[ 2, 1],
[-30, 4],
[ 2, 3],
[ 1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0,1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_bad_shape(self):
ab = array([[ 0.0, 20, 6, 2],
[ 1, 4, 20, 14],
[ -30, 1, 7, 0],
[ 2, -1, 0, 0]])
l,u = 2,1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1,4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
class TestSolveHBanded(TestCase):
def test_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1,1))
def test_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[ -1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
class TestSolve(TestCase):
def test_20Feb04_bug(self):
a = [[1,1],[1.0,0]] # ok
x0 = solve(a,[1,0j])
assert_array_almost_equal(dot(a,x0),[1,0])
a = [[1,1],[1.2,0]] # gives failure with clapack.zgesv(..,rowmajor=0)
b = [1,0j]
x0 = solve(a,b)
assert_array_almost_equal(dot(a,x0),[1,0])
def test_simple(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym(self):
a = [[2,3],[3,5]]
for lower in [0,1]:
for b in ([[1,0],[0,1]],[1,0]):
x = solve(a,b,sym_pos=1,lower=lower)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym_complex(self):
a = [[5,2],[2,4]]
for b in [[1j,0],
[[1j,1j],
[0,2]],
]:
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_simple_complex(self):
a = array([[5,2],[2j,4]],'D')
for b in [[1j,0],
[[1j,1j],
[0,2]],
[1,0j],
array([1,0],'D'),
]:
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_nils_20Feb04(self):
n = 2
A = random([n,n])+random([n,n])*1j
X = zeros((n,n),'D')
Ainv = inv(A)
R = identity(n)+identity(n)*0j
for i in arange(0,n):
r = R[:,i]
X[:,i] = solve(A,r)
assert_array_almost_equal(X,Ainv)
def test_random(self):
n = 20
a = random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_complex(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = a[j,i]
for i in range(4):
b = random([n])
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym_complex(self):
n = 20
a = random([n,n])
#a = a + 1j*random([n,n]) # XXX: with this the accuracy will be very low
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = conjugate(a[j,i])
b = random([n])+2j*random([n])
for i in range(2):
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
class TestSolveTriangular(TestCase):
def test_simple(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1,0], [1,2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True)
assert_array_almost_equal(sol, [1, 0])
# check that it works also for non-contiguous matrices
sol = solve_triangular(A.T, b, lower=False)
assert_array_almost_equal(sol, [.5, .5])
# and that it gives the same result as trans=1
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [.5, .5])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
def test_simple_complex(self):
"""
solve_triangular on a simple 2x2 complex matrix
"""
A = array([[1+1j, 0], [1j, 2]])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
class TestInv(TestCase):
def test_simple(self):
a = [[1,2],[3,4]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
a = [[1,2,3],[4,5,6],[7,8,10]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0,0],[0,1,0],[0,0,1]])
def test_random(self):
n = 20
for i in range(4):
a = random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
def test_random_complex(self):
n = 20
for i in range(4):
a = random([n,n])+2j*random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
class TestDet(TestCase):
def test_simple(self):
a = [[1,2],[3,4]]
a_det = det(a)
assert_almost_equal(a_det,-2.0)
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_det = det(a)
assert_almost_equal(a_det,-6+4j)
def test_random(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_almost_equal(d1,d2)
def test_random_complex(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n]) + 2j*random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_almost_equal(d1,d2)
def direct_lstsq(a,b,cmplx=0):
at = transpose(a)
if cmplx:
at = conjugate(at)
a1 = dot(at, a)
b1 = dot(at, b)
return solve(a1, b1)
class TestLstsq(TestCase):
def test_random_overdet_large(self):
#bug report: Nils Wagner
n = 200
a = random([n,2])
for i in range(2): a[i,i] = 20*(.1+a[i,i])
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_simple_exact(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
b = [1,2,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b))
assert_almost_equal((abs(dot(a,x) - b)**2).sum(axis=0), res)
def test_simple_overdet_complex(self):
a = [[1+2j,2],[4,5],[3,4]]
b = [1,2+4j,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b,cmplx=1))
assert_almost_equal(res, (abs(dot(a,x) - b)**2).sum(axis=0))
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
b = [1,2]
x,res,r,s = lstsq(a,b)
#XXX: need independent check
assert_array_almost_equal(x,[[-0.05555556],
[0.11111111],[0.27777778]])
def test_random_exact(self):
n = 20
a = random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_complex_exact(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n): a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_overdet(self):
n = 20
m = 15
a = random([n,m])
for i in range(m): a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
#XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_random_complex_overdet(self):
n = 20
m = 15
a = random([n,m]) + 1j * random([n,m])
for i in range(m):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
#XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b,1))
class TestPinv(TestCase):
def test_simple(self):
a=array([[1,2,3],[4,5,6.],[7,8,10]])
a_pinv = pinv(a)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
def test_simple_0det(self):
a=array([[1,2,3],[4,5,6.],[7,8,9]])
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_cols(self):
a=array([[1,2,3],[4,5,6.]])
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_rows(self):
a=array([[1,2],[3,4],[5,6]])
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
class TestNorm(object):
def test_zero_norm(self):
assert_equal(norm([1,0,3], 0), 2)
assert_equal(norm([1,2,3], 0), 3)
class TestOverwrite(object):
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_solve(self):
assert_no_overwrite(solve, [(3,3), (3,)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_solve_triangular(self):
assert_no_overwrite(solve_triangular, [(3,3), (3,)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_solve_banded(self):
assert_no_overwrite(lambda ab, b: solve_banded((2,1), ab, b),
[(4,6), (6,)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_solveh_banded(self):
assert_no_overwrite(solveh_banded, [(2,6), (6,)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_inv(self):
assert_no_overwrite(inv, [(3,3)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_det(self):
assert_no_overwrite(det, [(3,3)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_lstsq(self):
assert_no_overwrite(lstsq, [(3,2), (3,)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_pinv(self):
assert_no_overwrite(pinv, [(3,3)])
@dec.knownfailureif(sys.platform == "cli", "__array_interface__ is not implemented for .NET yet")
def test_pinv2(self):
assert_no_overwrite(pinv2, [(3,3)])
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/r-trimcluster/package.py | 5 | 1642 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTrimcluster(RPackage):
"""trimcluster: Cluster analysis with trimming"""
homepage = "http://www.homepages.ucl.ac.uk/~ucakche"
url = "https://cran.r-project.org/src/contrib/trimcluster_0.1-2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/trimcluster"
version('0.1-2', '7617920e224bd18f5b87db38a3116ec2')
depends_on('r@1.9.0:')
| lgpl-2.1 |
michaelaye/vispy | vispy/ext/_bundled/husl.py | 21 | 7663 | """
HUSL colors python implementation.
Source: https://github.com/husl-colors/husl.py
Copyright (c) 2015 Alexei Boronine
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import operator
import math
__version__ = "4.0.2"
m = [
[3.240969941904521, -1.537383177570093, -0.498610760293],
[-0.96924363628087, 1.87596750150772, 0.041555057407175],
[0.055630079696993, -0.20397695888897, 1.056971514242878],
]
m_inv = [
[0.41239079926595, 0.35758433938387, 0.18048078840183],
[0.21263900587151, 0.71516867876775, 0.072192315360733],
[0.019330818715591, 0.11919477979462, 0.95053215224966],
]
refX = 0.95045592705167
refY = 1.0
refZ = 1.089057750759878
refU = 0.19783000664283
refV = 0.46831999493879
kappa = 903.2962962
epsilon = 0.0088564516
# Public API
def husl_to_rgb(h, s, l):
return lch_to_rgb(*husl_to_lch([h, s, l]))
def husl_to_hex(h, s, l):
return rgb_to_hex(husl_to_rgb(h, s, l))
def rgb_to_husl(r, g, b):
return lch_to_husl(rgb_to_lch(r, g, b))
def hex_to_husl(hex):
return rgb_to_husl(*hex_to_rgb(hex))
def huslp_to_rgb(h, s, l):
return lch_to_rgb(*huslp_to_lch([h, s, l]))
def huslp_to_hex(h, s, l):
return rgb_to_hex(huslp_to_rgb(h, s, l))
def rgb_to_huslp(r, g, b):
return lch_to_huslp(rgb_to_lch(r, g, b))
def hex_to_huslp(hex):
return rgb_to_huslp(*hex_to_rgb(hex))
def lch_to_rgb(l, c, h):
return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))
def rgb_to_lch(r, g, b):
return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))
def get_bounds(L):
sub1 = ((L + 16.0) ** 3.0) / 1560896.0
sub2 = sub1 if sub1 > epsilon else L / kappa
ret = []
for [m1, m2, m3] in m:
for t in [0, 1]:
top1 = (284517.0 * m1 - 94839.0 * m3) * sub2
top2 = ((838422.0 * m3 + 769860.0 * m2 + 731718.0 * m1)
* L * sub2 - 769860.0 * t * L)
bottom = (632260.0 * m3 - 126452.0 * m2) * sub2 + 126452.0 * t
ret.append((top1 / bottom, top2 / bottom))
return ret
def intersect_line_line(line1, line2):
return (line1[1] - line2[1]) / (line2[0] - line1[0])
def distance_from_pole(point):
return math.sqrt(point[0] ** 2 + point[1] ** 2)
def length_of_ray_until_intersect(theta, line):
m1, b1 = line
length = b1 / (math.sin(theta) - m1 * math.cos(theta))
if length < 0:
return None
return length
def max_safe_chroma_for_L(L):
lengths = []
for [m1, b1] in get_bounds(L):
x = intersect_line_line((m1, b1), (-1.0 / m1, 0.0))
lengths.append(distance_from_pole((x, b1 + x * m1)))
return min(lengths)
def max_chroma_for_LH(L, H):
hrad = H / 360.0 * math.pi * 2.0
lengths = []
for line in get_bounds(L):
l = length_of_ray_until_intersect(hrad, line)
if l is not None:
lengths.append(l)
return min(lengths)
def dot_product(a, b):
return sum(map(operator.mul, a, b))
def f(t):
if t > epsilon:
return 116 * math.pow((t / refY), 1.0 / 3.0) - 16.0
else:
return (t / refY) * kappa
def f_inv(t):
if t > 8:
return refY * math.pow((t + 16.0) / 116.0, 3.0)
else:
return refY * t / kappa
def from_linear(c):
if c <= 0.0031308:
return 12.92 * c
else:
return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
def to_linear(c):
a = 0.055
if c > 0.04045:
return (math.pow((c + a) / (1.0 + a), 2.4))
else:
return (c / 12.92)
def rgb_prepare(triple):
ret = []
for ch in triple:
ch = round(ch, 3)
if ch < -0.0001 or ch > 1.0001:
raise Exception("Illegal RGB value %f" % ch)
if ch < 0:
ch = 0
if ch > 1:
ch = 1
# Fix for Python 3 which by default rounds 4.5 down to 4.0
# instead of Python 2 which is rounded to 5.0 which caused
# a couple off by one errors in the tests. Tests now all pass
# in Python 2 and Python 3
ret.append(round(ch * 255 + 0.001, 0))
return ret
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def xyz_to_rgb(triple):
xyz = map(lambda row: dot_product(row, triple), m)
return list(map(from_linear, xyz))
def rgb_to_xyz(triple):
rgbl = list(map(to_linear, triple))
return list(map(lambda row: dot_product(row, rgbl), m_inv))
def xyz_to_luv(triple):
X, Y, Z = triple
if X == Y == Z == 0.0:
return [0.0, 0.0, 0.0]
varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
L = f(Y)
# Black will create a divide-by-zero error
if L == 0.0:
return [0.0, 0.0, 0.0]
U = 13.0 * L * (varU - refU)
V = 13.0 * L * (varV - refV)
return [L, U, V]
def luv_to_xyz(triple):
L, U, V = triple
if L == 0:
return [0.0, 0.0, 0.0]
varY = f_inv(L)
varU = U / (13.0 * L) + refU
varV = V / (13.0 * L) + refV
Y = varY * refY
X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
return [X, Y, Z]
def luv_to_lch(triple):
L, U, V = triple
C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
hrad = (math.atan2(V, U))
H = math.degrees(hrad)
if H < 0.0:
H = 360.0 + H
return [L, C, H]
def lch_to_luv(triple):
L, C, H = triple
Hrad = math.radians(H)
U = (math.cos(Hrad) * C)
V = (math.sin(Hrad) * C)
return [L, U, V]
def husl_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma_for_LH(L, H)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_husl(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma_for_LH(L, H)
S = C / mx * 100.0
return [H, S, L]
def huslp_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_safe_chroma_for_L(L)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_huslp(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_safe_chroma_for_L(L)
S = C / mx * 100.0
return [H, S, L]
| bsd-3-clause |
ryanss/holidays.py | holidays/countries/argentina.py | 2 | 5803 | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2020
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, FR, TH
from holidays.constants import WEEKEND
from holidays.constants import JAN, MAR, APR, MAY, JUN, JUL, AUG, OCT, \
NOV, DEC
from holidays.holiday_base import HolidayBase
class Argentina(HolidayBase):
# https://www.argentina.gob.ar/interior/feriados
# https://es.wikipedia.org/wiki/Anexo:D%C3%ADas_feriados_en_Argentina
# http://servicios.lanacion.com.ar/feriados
# https://www.clarin.com/feriados/
def __init__(self, **kwargs):
self.country = 'AR'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if not self.observed and date(year, JAN, 1).weekday() in WEEKEND:
pass
else:
self[date(year, JAN, 1)] = "Año Nuevo [New Year's Day]"
# Carnival days
name = "Día de Carnaval [Carnival's Day]"
self[easter(year) - rd(days=48)] = name
self[easter(year) - rd(days=47)] = name
# Memory's National Day for the Truth and Justice
name = "Día Nacional de la Memoria por la Verdad y la Justicia " \
"[Memory's National Day for the Truth and Justice]"
if not self.observed and date(year, MAR, 24).weekday() in WEEKEND:
pass
else:
self[date(year, MAR, 24)] = name
# Holy Week
name_thu = "Semana Santa (Jueves Santo) [Holy day (Holy Thursday)]"
name_fri = "Semana Santa (Viernes Santo) [Holy day (Holy Friday)]"
name_easter = 'Día de Pascuas [Easter Day]'
self[easter(year) + rd(weekday=TH(-1))] = name_thu
self[easter(year) + rd(weekday=FR(-1))] = name_fri
if not self.observed and easter(year).weekday() in WEEKEND:
pass
else:
self[easter(year)] = name_easter
# Veterans Day and the Fallen in the Malvinas War
if not self.observed and date(year, APR, 2).weekday() in WEEKEND:
pass
else:
self[date(year, APR, 2)] = "Día del Veterano y de los Caidos " \
"en la Guerra de Malvinas [Veterans" \
" Day and the Fallen in the" \
" Malvinas War]"
# Labor Day
name = "Día del Trabajo [Labour Day]"
if not self.observed and date(year, MAY, 1).weekday() in WEEKEND:
pass
else:
self[date(year, MAY, 1)] = name
# May Revolution Day
name = "Día de la Revolucion de Mayo [May Revolution Day]"
if not self.observed and date(year, MAY, 25).weekday() in WEEKEND:
pass
else:
self[date(year, MAY, 25)] = name
# Day Pass to the Immortality of General Martín Miguel de Güemes.
name = "Día Pase a la Inmortalidad " \
"del General Martín Miguel de Güemes [Day Pass " \
"to the Immortality of General Martín Miguel de Güemes]"
if not self.observed and date(year, JUN, 17).weekday() in WEEKEND:
pass
else:
self[date(year, JUN, 17)] = name
# Day Pass to the Immortality of General D. Manuel Belgrano.
name = "Día Pase a la Inmortalidad " \
"del General D. Manuel Belgrano [Day Pass " \
"to the Immortality of General D. Manuel Belgrano]"
if not self.observed and date(year, JUN, 20).weekday() in WEEKEND:
pass
else:
self[date(year, JUN, 20)] = name
# Independence Day
name = "Día de la Independencia [Independence Day]"
if not self.observed and date(year, JUL, 9).weekday() in WEEKEND:
pass
else:
self[date(year, JUL, 9)] = name
# Day Pass to the Immortality of General D. José de San Martin
name = "Día Pase a la Inmortalidad " \
"del General D. José de San Martin [Day Pass " \
"to the Immortality of General D. José de San Martin]"
if not self.observed and date(year, AUG, 17).weekday() in WEEKEND:
pass
else:
self[date(year, AUG, 17)] = name
# Respect for Cultural Diversity Day or Columbus day
if not self.observed and date(year, OCT, 12).weekday() in WEEKEND:
pass
elif year < 2010:
self[date(year, OCT, 12)] = "Día de la Raza [Columbus day]"
else:
self[date(year, OCT, 12)] = "Día del Respeto a la Diversidad" \
" Cultural [Respect for" \
" Cultural Diversity Day]"
# National Sovereignty Day
name = "Día Nacional de la Soberanía [National Sovereignty Day]"
if not self.observed and date(year, NOV, 20).weekday() in WEEKEND:
pass
elif year >= 2010:
self[date(year, NOV, 20)] = name
# Immaculate Conception
if not self.observed and date(year, DEC, 8).weekday() in WEEKEND:
pass
else:
self[date(year, DEC, 8)] = "La Inmaculada Concepción" \
" [Immaculate Conception]"
# Christmas
self[date(year, DEC, 25)] = "Navidad [Christmas]"
class AR(Argentina):
pass
class ARG(Argentina):
pass
| mit |
ionrock/designate | functionaltests/api/v2/clients/transfer_accepts_client.py | 3 | 2619 | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functionaltests.api.v2.models.transfer_accepts_model import \
TransferAcceptsModel
from functionaltests.api.v2.models.transfer_accepts_model import \
TransferAcceptsListModel
from functionaltests.common.client import ClientMixin
class TransferAcceptClient(ClientMixin):
@classmethod
def transfer_accepts_uri(cls, filters=None):
url = "/v2/zones/tasks/transfer_accepts"
if filters:
url = cls.add_filters(url, filters)
return url
@classmethod
def transfer_accept_uri(cls, transfer_request_id):
return "/v2/zones/tasks/transfer_accepts/{1}".format(
transfer_request_id)
def list_transfer_accepts(self, zone_id, filters=None, **kwargs):
resp, body = self.client.get(
self.transfer_accepts_uri(filters), **kwargs)
return self.deserialize(resp, body, TransferAcceptsListModel)
def get_transfer_accept(self, zone_id, transfer_request_id, **kwargs):
resp, body = self.client.get(self.transfer_accept_uri(
transfer_request_id),
**kwargs)
return self.deserialize(resp, body, TransferAcceptsModel)
def post_transfer_accept(self, transfer_request_model, **kwargs):
resp, body = self.client.post(
self.transfer_accepts_uri(),
body=transfer_request_model.to_json(),
**kwargs)
return self.deserialize(resp, body, TransferAcceptsModel)
def put_transfer_accept(self, zone_id, transfer_request_id,
transfer_request_model, **kwargs):
resp, body = self.client.put(self.transfer_accept_uri(
transfer_request_id),
body=transfer_request_model.to_json(), **kwargs)
return self.deserialize(resp, body, TransferAcceptsModel)
def delete_transfer_accept(self, zone_id, transfer_request_id, **kwargs):
resp, body = self.client.delete(
self.transfer_accept_uri(zone_id, transfer_request_id), **kwargs)
return self.deserialize(resp, body, TransferAcceptsModel)
| apache-2.0 |
projectcalico/calico-nova | nova/tests/functional/v3/test_server_metadata.py | 16 | 3356 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.v3 import test_servers
class ServersMetadataJsonTest(test_servers.ServersSampleBase):
extends_name = 'core_only'
sample_dir = 'server-metadata'
def _create_and_set(self, subs):
uuid = self._post_server()
response = self._do_put('%s/servers/%s/metadata' %
(self.api.project_id, uuid),
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
return uuid
def generalize_subs(self, subs, vanilla_regexes):
subs['value'] = '(Foo|Bar) Value'
return subs
def test_metadata_put_all(self):
# Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
self._create_and_set(subs)
def test_metadata_post_all(self):
# Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_post('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_get_all(self):
# Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_put(self):
# Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_put('servers/%s/metadata/foo' % uuid,
'server-metadata-req',
subs)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_get(self):
# Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_delete(self):
# Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
| apache-2.0 |
glovebx/odoo | addons/hr_payroll/res_config.py | 441 | 1294 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class human_resources_configuration(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'module_hr_payroll_account': fields.boolean('Link your payroll to accounting system',
help ="""Create journal entries from payslips"""),
}
| agpl-3.0 |
AWhetter/gyp | test/generator-output/gyptest-symlink.py | 41 | 1226 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target when the --generator-output= option is used to put
the build configuration files in a separate directory tree referenced by a
symlink.
"""
import TestGyp
import os
test = TestGyp.TestGyp()
if not hasattr(os, 'symlink'):
test.skip_test('Missing os.symlink -- skipping test.\n')
test.writable(test.workpath('src'), False)
test.writable(test.workpath('src/subdir2/deeper/build'), True)
test.subdir(test.workpath('build'))
test.subdir(test.workpath('build/deeper'))
test.symlink('build/deeper', test.workpath('symlink'))
test.writable(test.workpath('build/deeper'), True)
test.run_gyp('deeper.gyp',
'-Dset_symroot=2',
'--generator-output=' + test.workpath('symlink'),
chdir='src/subdir2/deeper')
chdir = 'symlink'
test.build('deeper.gyp', test.ALL, chdir=chdir)
if test.format == 'xcode':
chdir = 'src/subdir2/deeper'
test.run_built_executable('deeper',
chdir=chdir,
stdout="Hello from deeper.c\n")
test.pass_test()
| bsd-3-clause |
jeffmahoney/crash-python | contrib/lsbtrfs-cmd.py | 1 | 1665 | #!/usr/bin/python3
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
path = "/lib/modules/4.4.20-6.gd2e08c5-default/kernel/fs/btrfs/btrfs.ko"
from crash.types.list import list_for_each_entry
import gdb
import uuid
from crash.commands import CrashCommand
class LSBtrfs(CrashCommand):
"""
doc
"""
def __init__(self):
CrashCommand.__init__(self, "lsbtrfs", None)
def execute(self, argv):
super_blocks = gdb.lookup_symbol('super_blocks', None)[0].value()
sbtype = gdb.lookup_type('struct super_block')
try:
btrfs_fs_info_type = gdb.lookup_type('struct btrfs_fs_info')
except gdb.error:
# Load the module if it's not loaded yet
module_type = gdb.lookup_type('struct module')
modules = gdb.lookup_symbol('modules', None)[0].value()
for module in list_for_each_entry(modules, module_type, 'list'):
if module['name'].string() == "btrfs":
addr = module['module_core']
gdb.execute("add-symbol-file {} {}".format(path, addr))
btrfs_fs_info_type = gdb.lookup_type('struct btrfs_fs_info')
for sb in list_for_each_entry(super_blocks, sbtype, 's_list'):
if sb['s_type']['name'].string() == "btrfs":
fs_info = gdb.Value(sb['s_fs_info']).cast(btrfs_fs_info_type.pointer())
u = 0
for i in range(0, 16):
u <<= 8
u += int(fs_info['fsid'][i])
u = uuid.UUID(int=u)
print "{} -> {} {}".format(sb.address, sb['s_id'].string(), u)
LSBtrfs()
| gpl-2.0 |
a-parhom/edx-platform | openedx/core/djangoapps/ccxcon/tests/test_tasks.py | 4 | 1431 | """
Tests for the CCXCon celery tasks
"""
import mock
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.ccxcon import api, tasks
class CCXConTaskTestCase(TestCase):
"""
Tests for CCXCon tasks.
"""
shard = 2
@mock.patch('openedx.core.djangoapps.ccxcon.api.course_info_to_ccxcon')
def test_update_ccxcon_task_ok(self, mock_citc):
"""
Test task with no problems
"""
mock_response = mock.Mock()
mock_citc.return_value = mock_response
course_id = u'course-v1:OrgFoo+CN199+CR-FALL01'
tasks.update_ccxcon.delay(course_id)
mock_citc.assert_called_once_with(CourseKey.from_string(course_id))
@mock.patch('openedx.core.djangoapps.ccxcon.api.course_info_to_ccxcon')
def test_update_ccxcon_task_retry(self, mock_citc):
"""
Test task with exception that triggers a retry
"""
mock_citc.side_effect = api.CCXConnServerError()
course_id = u'course-v1:OrgFoo+CN199+CR-FALL01'
tasks.update_ccxcon.delay(course_id)
self.assertEqual(mock_citc.call_count, 6)
course_key = CourseKey.from_string(course_id)
for call in mock_citc.call_args_list:
c_args, c_kwargs = call
self.assertEqual(c_kwargs, {})
self.assertEqual(len(c_args), 1)
self.assertEqual(c_args[0], course_key)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.