repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
aequitas/home-assistant | homeassistant/components/lightwave/switch.py | 7 | 1682 | """Support for LightwaveRF switches."""
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import CONF_NAME
from . import LIGHTWAVE_LINK
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Find and return LightWave switches."""
if not discovery_info:
return
switches = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
switches.append(LWRFSwitch(name, device_id, lwlink))
async_add_entities(switches)
class LWRFSwitch(SwitchDevice):
"""Representation of a LightWaveRF switch."""
def __init__(self, name, device_id, lwlink):
"""Initialize LWRFSwitch entity."""
self._name = name
self._device_id = device_id
self._state = None
self._lwlink = lwlink
@property
def should_poll(self):
"""No polling needed for a LightWave light."""
return False
@property
def name(self):
"""Lightwave switch name."""
return self._name
@property
def is_on(self):
"""Lightwave switch is on state."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the LightWave switch on."""
self._state = True
self._lwlink.turn_on_switch(self._device_id, self._name)
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the LightWave switch off."""
self._state = False
self._lwlink.turn_off(self._device_id, self._name)
self.async_schedule_update_ha_state()
| apache-2.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/celery/tests/backends/test_base.py | 6 | 15581 | from __future__ import absolute_import
import sys
import types
from contextlib import contextmanager
from celery.exceptions import ChordError
from celery.five import items, range
from celery.utils import serialization
from celery.utils.serialization import subclass_exception
from celery.utils.serialization import find_pickleable_exception as fnpe
from celery.utils.serialization import UnpickleableExceptionWrapper
from celery.utils.serialization import get_pickleable_exception as gpe
from celery import states
from celery import group
from celery.backends.base import (
BaseBackend,
KeyValueStoreBackend,
DisabledBackend,
)
from celery.result import result_from_tuple
from celery.utils import uuid
from celery.tests.case import AppCase, Mock, SkipTest, patch
class wrapobject(object):
def __init__(self, *args, **kwargs):
self.args = args
if sys.version_info[0] == 3 or getattr(sys, 'pypy_version_info', None):
Oldstyle = None
else:
Oldstyle = types.ClassType('Oldstyle', (), {})
Unpickleable = subclass_exception('Unpickleable', KeyError, 'foo.module')
Impossible = subclass_exception('Impossible', object, 'foo.module')
Lookalike = subclass_exception('Lookalike', wrapobject, 'foo.module')
class test_serialization(AppCase):
def test_create_exception_cls(self):
self.assertTrue(serialization.create_exception_cls('FooError', 'm'))
self.assertTrue(serialization.create_exception_cls('FooError', 'm',
KeyError))
class test_BaseBackend_interface(AppCase):
def setup(self):
self.b = BaseBackend(self.app)
def test__forget(self):
with self.assertRaises(NotImplementedError):
self.b._forget('SOMExx-N0Nex1stant-IDxx-')
def test_forget(self):
with self.assertRaises(NotImplementedError):
self.b.forget('SOMExx-N0nex1stant-IDxx-')
def test_on_chord_part_return(self):
self.b.on_chord_part_return(None, None, None)
def test_apply_chord(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
self.b.apply_chord(
group(app=self.app), (), 'dakj221', None,
result=[self.app.AsyncResult(x) for x in [1, 2, 3]],
)
self.assertTrue(self.app.tasks[unlock].apply_async.call_count)
class test_exception_pickle(AppCase):
def test_oldstyle(self):
if Oldstyle is None:
raise SkipTest('py3k does not support old style classes')
self.assertTrue(fnpe(Oldstyle()))
def test_BaseException(self):
self.assertIsNone(fnpe(Exception()))
def test_get_pickleable_exception(self):
exc = Exception('foo')
self.assertEqual(gpe(exc), exc)
def test_unpickleable(self):
self.assertIsInstance(fnpe(Unpickleable()), KeyError)
self.assertIsNone(fnpe(Impossible()))
class test_prepare_exception(AppCase):
def setup(self):
self.b = BaseBackend(self.app)
def test_unpickleable(self):
x = self.b.prepare_exception(Unpickleable(1, 2, 'foo'))
self.assertIsInstance(x, KeyError)
y = self.b.exception_to_python(x)
self.assertIsInstance(y, KeyError)
def test_impossible(self):
x = self.b.prepare_exception(Impossible())
self.assertIsInstance(x, UnpickleableExceptionWrapper)
self.assertTrue(str(x))
y = self.b.exception_to_python(x)
self.assertEqual(y.__class__.__name__, 'Impossible')
if sys.version_info < (2, 5):
self.assertTrue(y.__class__.__module__)
else:
self.assertEqual(y.__class__.__module__, 'foo.module')
def test_regular(self):
x = self.b.prepare_exception(KeyError('baz'))
self.assertIsInstance(x, KeyError)
y = self.b.exception_to_python(x)
self.assertIsInstance(y, KeyError)
class KVBackend(KeyValueStoreBackend):
mget_returns_dict = False
def __init__(self, app, *args, **kwargs):
self.db = {}
super(KVBackend, self).__init__(app)
def get(self, key):
return self.db.get(key)
def set(self, key, value):
self.db[key] = value
def mget(self, keys):
if self.mget_returns_dict:
return dict((key, self.get(key)) for key in keys)
else:
return [self.get(k) for k in keys]
def delete(self, key):
self.db.pop(key, None)
class DictBackend(BaseBackend):
def __init__(self, *args, **kwargs):
BaseBackend.__init__(self, *args, **kwargs)
self._data = {'can-delete': {'result': 'foo'}}
def _restore_group(self, group_id):
if group_id == 'exists':
return {'result': 'group'}
def _get_task_meta_for(self, task_id):
if task_id == 'task-exists':
return {'result': 'task'}
def _delete_group(self, group_id):
self._data.pop(group_id, None)
class test_BaseBackend_dict(AppCase):
def setup(self):
self.b = DictBackend(app=self.app)
def test_delete_group(self):
self.b.delete_group('can-delete')
self.assertNotIn('can-delete', self.b._data)
def test_prepare_exception_json(self):
x = DictBackend(self.app, serializer='json')
e = x.prepare_exception(KeyError('foo'))
self.assertIn('exc_type', e)
e = x.exception_to_python(e)
self.assertEqual(e.__class__.__name__, 'KeyError')
self.assertEqual(str(e), "'foo'")
def test_save_group(self):
b = BaseBackend(self.app)
b._save_group = Mock()
b.save_group('foofoo', 'xxx')
b._save_group.assert_called_with('foofoo', 'xxx')
def test_forget_interface(self):
b = BaseBackend(self.app)
with self.assertRaises(NotImplementedError):
b.forget('foo')
def test_restore_group(self):
self.assertIsNone(self.b.restore_group('missing'))
self.assertIsNone(self.b.restore_group('missing'))
self.assertEqual(self.b.restore_group('exists'), 'group')
self.assertEqual(self.b.restore_group('exists'), 'group')
self.assertEqual(self.b.restore_group('exists', cache=False), 'group')
def test_reload_group_result(self):
self.b._cache = {}
self.b.reload_group_result('exists')
self.b._cache['exists'] = {'result': 'group'}
def test_reload_task_result(self):
self.b._cache = {}
self.b.reload_task_result('task-exists')
self.b._cache['task-exists'] = {'result': 'task'}
def test_fail_from_current_stack(self):
self.b.mark_as_failure = Mock()
try:
raise KeyError('foo')
except KeyError as exc:
self.b.fail_from_current_stack('task_id')
self.assertTrue(self.b.mark_as_failure.called)
args = self.b.mark_as_failure.call_args[0]
self.assertEqual(args[0], 'task_id')
self.assertIs(args[1], exc)
self.assertTrue(args[2])
def test_prepare_value_serializes_group_result(self):
self.b.serializer = 'json'
g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')])
v = self.b.prepare_value(g)
self.assertIsInstance(v, (list, tuple))
self.assertEqual(result_from_tuple(v, app=self.app), g)
v2 = self.b.prepare_value(g[0])
self.assertIsInstance(v2, (list, tuple))
self.assertEqual(result_from_tuple(v2, app=self.app), g[0])
self.b.serializer = 'pickle'
self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
def test_is_cached(self):
b = BaseBackend(app=self.app, max_cached_results=1)
b._cache['foo'] = 1
self.assertTrue(b.is_cached('foo'))
self.assertFalse(b.is_cached('false'))
class test_KeyValueStoreBackend(AppCase):
def setup(self):
self.b = KVBackend(app=self.app)
def test_on_chord_part_return(self):
assert not self.b.implements_incr
self.b.on_chord_part_return(None, None, None)
def test_get_store_delete_result(self):
tid = uuid()
self.b.mark_as_done(tid, 'Hello world')
self.assertEqual(self.b.get_result(tid), 'Hello world')
self.assertEqual(self.b.get_status(tid), states.SUCCESS)
self.b.forget(tid)
self.assertEqual(self.b.get_status(tid), states.PENDING)
def test_strip_prefix(self):
x = self.b.get_key_for_task('x1b34')
self.assertEqual(self.b._strip_prefix(x), 'x1b34')
self.assertEqual(self.b._strip_prefix('x1b34'), 'x1b34')
def test_get_many(self):
for is_dict in True, False:
self.b.mget_returns_dict = is_dict
ids = dict((uuid(), i) for i in range(10))
for id, i in items(ids):
self.b.mark_as_done(id, i)
it = self.b.get_many(list(ids))
for i, (got_id, got_state) in enumerate(it):
self.assertEqual(got_state['result'], ids[got_id])
self.assertEqual(i, 9)
self.assertTrue(list(self.b.get_many(list(ids))))
def test_get_many_times_out(self):
tasks = [uuid() for _ in range(4)]
self.b._cache[tasks[1]] = {'status': 'PENDING'}
with self.assertRaises(self.b.TimeoutError):
list(self.b.get_many(tasks, timeout=0.01, interval=0.01))
def test_chord_part_return_no_gid(self):
self.b.implements_incr = True
task = Mock()
state = 'SUCCESS'
result = 10
task.request.group = None
self.b.get_key_for_chord = Mock()
self.b.get_key_for_chord.side_effect = AssertionError(
'should not get here',
)
self.assertIsNone(self.b.on_chord_part_return(task, state, result))
@contextmanager
def _chord_part_context(self, b):
@self.app.task(shared=False)
def callback(result):
pass
b.implements_incr = True
b.client = Mock()
with patch('celery.backends.base.GroupResult') as GR:
deps = GR.restore.return_value = Mock(name='DEPS')
deps.__len__ = Mock()
deps.__len__.return_value = 10
b.incr = Mock()
b.incr.return_value = 10
b.expire = Mock()
task = Mock()
task.request.group = 'grid'
cb = task.request.chord = callback.s()
task.request.chord.freeze()
callback.backend = b
callback.backend.fail_from_current_stack = Mock()
yield task, deps, cb
def test_chord_part_return_propagate_set(self):
with self._chord_part_context(self.b) as (task, deps, _):
self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True)
self.assertFalse(self.b.expire.called)
deps.delete.assert_called_with()
deps.join_native.assert_called_with(propagate=True, timeout=3.0)
def test_chord_part_return_propagate_default(self):
with self._chord_part_context(self.b) as (task, deps, _):
self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None)
self.assertFalse(self.b.expire.called)
deps.delete.assert_called_with()
deps.join_native.assert_called_with(
propagate=self.b.app.conf.CELERY_CHORD_PROPAGATES,
timeout=3.0,
)
def test_chord_part_return_join_raises_internal(self):
with self._chord_part_context(self.b) as (task, deps, callback):
deps._failed_join_report = lambda: iter([])
deps.join_native.side_effect = KeyError('foo')
self.b.on_chord_part_return(task, 'SUCCESS', 10)
self.assertTrue(self.b.fail_from_current_stack.called)
args = self.b.fail_from_current_stack.call_args
exc = args[1]['exc']
self.assertIsInstance(exc, ChordError)
self.assertIn('foo', str(exc))
def test_chord_part_return_join_raises_task(self):
b = KVBackend(serializer='pickle', app=self.app)
with self._chord_part_context(b) as (task, deps, callback):
deps._failed_join_report = lambda: iter([
self.app.AsyncResult('culprit'),
])
deps.join_native.side_effect = KeyError('foo')
b.on_chord_part_return(task, 'SUCCESS', 10)
self.assertTrue(b.fail_from_current_stack.called)
args = b.fail_from_current_stack.call_args
exc = args[1]['exc']
self.assertIsInstance(exc, ChordError)
self.assertIn('Dependency culprit raised', str(exc))
def test_restore_group_from_json(self):
b = KVBackend(serializer='json', app=self.app)
g = self.app.GroupResult(
'group_id',
[self.app.AsyncResult('a'), self.app.AsyncResult('b')],
)
b._save_group(g.id, g)
g2 = b._restore_group(g.id)['result']
self.assertEqual(g2, g)
def test_restore_group_from_pickle(self):
b = KVBackend(serializer='pickle', app=self.app)
g = self.app.GroupResult(
'group_id',
[self.app.AsyncResult('a'), self.app.AsyncResult('b')],
)
b._save_group(g.id, g)
g2 = b._restore_group(g.id)['result']
self.assertEqual(g2, g)
def test_chord_apply_fallback(self):
self.b.implements_incr = False
self.b.fallback_chord_unlock = Mock()
self.b.apply_chord(
group(app=self.app), (), 'group_id', 'body',
result='result', foo=1,
)
self.b.fallback_chord_unlock.assert_called_with(
'group_id', 'body', result='result', foo=1,
)
def test_get_missing_meta(self):
self.assertIsNone(self.b.get_result('xxx-missing'))
self.assertEqual(self.b.get_status('xxx-missing'), states.PENDING)
def test_save_restore_delete_group(self):
tid = uuid()
tsr = self.app.GroupResult(
tid, [self.app.AsyncResult(uuid()) for _ in range(10)],
)
self.b.save_group(tid, tsr)
self.b.restore_group(tid)
self.assertEqual(self.b.restore_group(tid), tsr)
self.b.delete_group(tid)
self.assertIsNone(self.b.restore_group(tid))
def test_restore_missing_group(self):
self.assertIsNone(self.b.restore_group('xxx-nonexistant'))
class test_KeyValueStoreBackend_interface(AppCase):
def test_get(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).get('a')
def test_set(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).set('a', 1)
def test_incr(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).incr('a')
def test_cleanup(self):
self.assertFalse(KeyValueStoreBackend(self.app).cleanup())
def test_delete(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).delete('a')
def test_mget(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).mget(['a'])
def test_forget(self):
with self.assertRaises(NotImplementedError):
KeyValueStoreBackend(self.app).forget('a')
class test_DisabledBackend(AppCase):
def test_store_result(self):
DisabledBackend(self.app).store_result()
def test_is_disabled(self):
with self.assertRaises(NotImplementedError):
DisabledBackend(self.app).get_status('foo')
| mit |
projecthamster/hamster-cli | hamster_cli/hamster_cli.py | 2 | 30015 | # -*- coding: utf-8 -*-
# This file is part of 'hamster_cli'.
#
# 'hamster_cli' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'hamster_cli' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'hamster_cli'. If not, see <http://www.gnu.org/licenses/>.
"""A time tracker for the command line. Utilizing the power of hamster-lib."""
from __future__ import absolute_import, unicode_literals
import datetime
import logging
import os
from collections import namedtuple
from gettext import gettext as _
import appdirs
import click
import hamster_lib
# Once we drop py2 support, we can use the builtin again but unicode support
# under python 2 is practicly non existing and manual encoding is not easily
# possible.
from backports.configparser import SafeConfigParser
from hamster_lib import Fact, HamsterControl, reports
from hamster_lib.helpers import time as time_helpers
from tabulate import tabulate
from . import help_strings
class HamsterAppDirs(appdirs.AppDirs):
"""Custom class that ensure appdirs exist."""
def __init__(self, *args, **kwargs):
"""Add create flag value to instance."""
super(HamsterAppDirs, self).__init__(*args, **kwargs)
self.create = True
@property
def user_data_dir(self):
"""Return ``user_data_dir``."""
directory = appdirs.user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
if self.create:
self._ensure_directory_exists(directory)
return directory
@property
def site_data_dir(self):
"""Return ``site_data_dir``."""
directory = appdirs.site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
if self.create:
self._ensure_directory_exists(directory)
return directory
@property
def user_config_dir(self):
"""Return ``user_config_dir``."""
directory = appdirs.user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
if self.create:
self._ensure_directory_exists(directory)
return directory
@property
def site_config_dir(self):
"""Return ``site_config_dir``."""
directory = appdirs.site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
if self.create:
self._ensure_directory_exists(directory)
return directory
@property
def user_cache_dir(self):
"""Return ``user_cache_dir``."""
directory = appdirs.user_cache_dir(self.appname, self.appauthor,
version=self.version)
if self.create:
self._ensure_directory_exists(directory)
return directory
@property
def user_log_dir(self):
"""Return ``user_log_dir``."""
directory = appdirs.user_log_dir(self.appname, self.appauthor,
version=self.version)
if self.create:
self._ensure_directory_exists(directory)
return directory
def _ensure_directory_exists(self, directory):
"""Ensure that the passed path exists."""
if not os.path.lexists(directory):
os.makedirs(directory)
return directory
class Controler(HamsterControl):
"""A custom controler that adds config handling on top of its regular functionality."""
def __init__(self):
"""Instantiate controler instance and adding client_config to it."""
lib_config, client_config = _get_config(_get_config_instance())
super(Controler, self).__init__(lib_config)
self.client_config = client_config
LOG_LEVELS = {
'info': logging.INFO,
'debug': logging.DEBUG,
'warning': logging.WARNING,
'error': logging.ERROR,
}
AppDirs = HamsterAppDirs('hamster_cli')
pass_controler = click.make_pass_decorator(Controler, ensure=True)
@click.group(help=help_strings.RUN_HELP)
@pass_controler
def run(controler):
"""General context run right before any of the commands."""
click.clear()
_show_greeting()
_run(controler)
def _run(controler):
"""Make sure that loggers are setup properly."""
_setup_logging(controler)
@run.command(help=help_strings.SEARCH_HELP)
@click.argument('search_term')
@click.argument('time_range', default='')
@pass_controler
def search(controler, search_term, time_range):
"""Fetch facts matching certain criteria."""
# [FIXME]
# Check what we actually match against.
_search(controler, search_term, time_range)
def _search(controler, search_term, time_range):
"""
Search facts machting given timerange and search term. Both are optional.
Matching facts will be printed in a tabular representation.
Make sure that arguments are converted into apropiate types before passing
them on to the backend.
We leave it to the backend to first parse the timeinfo and then complete any
missing data based on the passed config settings.
Args:
search_term: Term that need to be matched by the fact in order to be considered a hit.
time_range: Only facts within this timerange will be considered.
"""
# [FIXME]
# As far as our backend is concerned search_term as well as time range are
# optional. If the same is true for legacy hamster-cli needs to be checked.
if not time_range:
start, end = (None, None)
else:
# [FIXME]
# This is a rather crude fix. Recent versions of ``hamster-lib`` do not
# provide a dedicated helper to parse *just* time(ranges) but expect a
# ``raw_fact`` text. In order to work around this we just append
# whitespaces to our time range argument which will qualify for the
# desired parsing.
# Once raw_fact/time parsing has been refactored in hamster-lib, this
# should no longer be needed.
time_range = time_range + ' '
timeinfo = time_helpers.extract_time_info(time_range)[0]
start, end = time_helpers.complete_timeframe(timeinfo, controler.config)
results = controler.facts.get_all(filter_term=search_term, start=start, end=end)
table, headers = _generate_facts_table(results)
click.echo(tabulate(table, headers=headers))
@run.command(help=help_strings.LIST_HELP)
@click.argument('time_range', default='')
@pass_controler
def list(controler, time_range):
"""List all facts within a timerange."""
_search(controler, search_term='', time_range=time_range)
@run.command(help=help_strings.START_HELP)
@click.argument('raw_fact')
@click.argument('start', default='')
@click.argument('end', default='')
@pass_controler
def start(controler, raw_fact, start, end):
"""Start or add a fact."""
# [FIXME]
# The original semantics do not work anymore. As we make a clear difference
# between *adding* a (complete) fact and *starting* a (ongoing) fact.
# This needs to be reflected in this command.
_start(controler, raw_fact, start, end)
def _start(controler, raw_fact, start, end):
"""
Start or add a fact.
Args:
raw_fact: ``raw_fact`` containing information about the Fact to be started. As an absolute
minimum this must be a string representing the 'activityname'.
start (optional): When does the fact start?
end (optional): When does the fact end?
Returns:
None: If everything went alright.
Note:
* Whilst it is possible to pass timeinformation as part of the ``raw_fact`` as
well as dedicated ``start`` and ``end`` arguments only the latter will be represented
in the resulting fact in such a case.
"""
fact = Fact.create_from_raw_fact(raw_fact)
# Explicit trumps implicit!
if start:
fact.start = time_helpers.parse_time(start)
if end:
fact.end = time_helpers.parse_time(end)
if not fact.end:
# We seem to want to start a new tmp fact
# Neither the raw fact string nor an additional optional end time have
# been passed.
# Until we decide wether to split this into start/add command we use the
# presence of any 'end' information as indication of the users intend.
tmp_fact = True
else:
tmp_fact = False
# We complete the facts times in both cases as even an new 'ongoing' fact
# may be in need of some time-completion for its start information.
# Complete missing fields with default values.
# legacy hamster_cli seems to have a different fallback behaviour than
# our regular backend, in particular the way 'day_start' is handled.
# For maximum consistency we use the backends unified ``complete_timeframe``
# helper instead. If behaviour similar to the legacy hamster-cli is desired,
# all that seems needed is to change ``day_start`` to '00:00'.
# The following is needed becauses start and end may be ``None``.
if not fact.start:
# No time information has been passed at all.
fact.start = datetime.datetime.now()
else:
# We got some time information, which may be incomplete however.
if not fact.end:
end_date = None
end_time = None
else:
end_date = fact.end.date()
end_time = fact.end.time()
timeframe = time_helpers.TimeFrame(
fact.start.date(), fact.start.time(), end_date, end_time, None)
fact.start, fact.end = time_helpers.complete_timeframe(timeframe, controler.config)
if tmp_fact:
# Quick fix for tmp facts. that way we can use the default helper
# function which will autocomplete the end info as well.
# Because of our use of ``complete timeframe our 'ongoing fact' may have
# recieved an ``end`` value now. In that case we reset it to ``None``.
fact.end = None
controler.client_logger.debug(_(
"New fact instance created: {fact}".format(fact=fact)
))
fact = controler.facts.save(fact)
@run.command(help=help_strings.STOP_HELP)
@pass_controler
def stop(controler):
"""Stop tracking current fact. Saving the result."""
_stop(controler)
def _stop(controler):
"""
Stop cucrrent 'ongoing fact' and save it to the backend.
Returns:
None: If successful.
Raises:
ValueError: If no *ongoing fact* can be found.
"""
try:
fact = controler.facts.stop_tmp_fact()
except ValueError:
message = _(
"Unable to continue temporary fact. Are you sure there is one?"
"Try running *current*."
)
raise click.ClickException(message)
else:
message = '{fact} ({duration} minutes)'.format(fact=fact, duration=fact.get_string_delta())
controler.client_logger.info(_(message))
click.echo(_(message))
@run.command(help=help_strings.CANCEL_HELP)
@pass_controler
def cancel(controler):
"""Cancel 'ongoing fact'. E.g stop it without storing in the backend."""
_cancel(controler)
def _cancel(controler):
"""
Cancel tracking current temporary fact, discaring the result.
Returns:
None: If success.
Raises:
KeyErŕor: No *ongoing fact* can be found.
"""
try:
controler.facts.cancel_tmp_fact()
except KeyError:
message = _("Nothing tracked right now. Not doing anything.")
controler.client_logger.info(message)
raise click.ClickException(message)
else:
message = _("Tracking canceled.")
click.echo(message)
controler.client_logger.debug(message)
@run.command(help=help_strings.EXPORT_HELP)
@click.argument('format', nargs=1, default='csv')
@click.argument('start', nargs=1, default='')
@click.argument('end', nargs=1, default='')
@pass_controler
def export(controler, format, start, end):
"""Export all facts of within a given timewindow to a file of specified format."""
_export(controler, format, start, end)
def _export(controler, format, start, end):
"""
Export all facts in the given timeframe in the format specified.
Args:
format (str): Format to export to. Valid options are: ``csv``, ``xml`` and ``ical``.
start (datetime.datetime): Consider only facts starting at this time or later.
end (datetime.datetime): Consider only facts starting no later than this time.
Returns:
None: If everything went alright.
Raises:
click.Exception: If format is not recognized.
"""
accepted_formats = ['csv', 'ical', 'xml']
# [TODO]
# Once hamster_lib has a proper 'export' register available we should be able
# to streamline this.
if format not in accepted_formats:
message = _("Unrecocgnized export format recieved")
controler.client_logger.info(message)
raise click.ClickException(message)
if not start:
start = None
if not end:
end = None
filepath = controler.client_config['export_path']
facts = controler.facts.get_all(start=start, end=end)
if format == 'csv':
writer = reports.TSVWriter(filepath)
writer.write_report(facts)
click.echo(_("Facts have been exported to: {path}".format(path=filepath)))
elif format == 'ical':
writer = reports.ICALWriter(filepath)
writer.write_report(facts)
click.echo(_("Facts have been exported to: {path}".format(path=filepath)))
elif format == 'xml':
writer = reports.XMLWriter(filepath)
writer.write_report(facts)
click.echo(_("Facts have been exported to: {path}".format(path=filepath)))
@run.command(help=help_strings.CATEGORIES_HELP)
@pass_controler
def categories(controler):
"""List all existing categories, ordered by name."""
_categories(controler)
def _categories(controler):
"""
List all existing categories, ordered by name.
Returns:
None: If success.
"""
result = controler.categories.get_all()
# [TODO]
# Provide nicer looking tabulated output.
for category in result:
click.echo(category.name)
@run.command(help=help_strings.CURRENT_HELP)
@pass_controler
def current(controler):
"""Display current *ongoing fact*."""
_current(controler)
def _current(controler):
"""
Return current *ongoing fact*.
Returns:
None: If everything went alright.
Raises:
click.ClickException: If we fail to fetch any *ongoing fact*.
"""
try:
fact = controler.facts.get_tmp_fact()
except KeyError:
message = _(
"There seems no be no activity beeing tracked right now."
" maybe you want to *start* tracking one right now?"
)
raise click.ClickException(message)
else:
fact.end = datetime.datetime.now()
string = '{fact} ({duration} minutes)'.format(fact=fact, duration=fact.get_string_delta())
click.echo(string)
@run.command(help=help_strings.ACTIVITIES_HELP)
@click.argument('search_term', default='')
@pass_controler
def activities(controler, search_term):
"""List all activities. Provide optional filtering by name."""
_activities(controler, search_term)
def _activities(controler, search_term):
"""
List all activities. Provide optional filtering by name.
Args:
search_term (str): String to match ``Activity.name`` against.
Returns:
None: If success.
"""
result = controler.activities.get_all(search_term=search_term)
table = []
headers = (_("Activity"), _("Category"))
for activity in result:
if activity.category:
category = activity.category.name
else:
category = None
table.append((activity.name, category))
click.echo(tabulate(table, headers=headers))
@run.command(help=help_strings.LICENSE_HELP)
def license():
"""Show license information."""
_license()
def _license():
"""Show license information."""
license = """
'hamster_cli' is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
'hamster_cli' is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with . If not, see <http://www.gnu.org/licenses/>.
"""
click.echo(license)
@run.command(help=help_strings.DETAILS_HELP)
@pass_controler
def details(controler):
"""List details about the runtime environment."""
_details(controler)
def _details(controler):
"""List details about the runtime environment."""
def get_db_info():
result = None
def get_sqlalchemy_info():
engine = controler.config['db_engine']
if engine == 'sqlite':
sqlalchemy_string = _("Using 'sqlite' with database stored under: {}".format(
controler.config['db_path']))
else:
port = controler.config.get('db_port', '')
if port:
port = ':{}'.format(port)
sqlalchemy_string = _(
"Using '{engine}' connecting to database {name} on {host}{port}"
" as user {username}.".format(
engine=engine, host=controler.config['db_host'], port=port,
username=controler.config['db_user'], name=controler.config['db_name'])
)
return sqlalchemy_string
# For now we do not need to check for various store option as we allow
# only one anyway.
result = get_sqlalchemy_info()
return result
from hamster_cli import __version__, __appname__
click.echo(_("You are running {name} version {version}.".format(
name=__appname__, version=__version__)))
click.echo("Configuration found under: {}.".format(_get_config_path()))
click.echo("Logfile stored under: {}.".format(controler.client_config['logfile_path']))
click.echo("Reports exported to: {}.".format(controler.client_config['export_path']))
click.echo(get_db_info())
# Helper functions
def _setup_logging(controler):
"""Setup logging for the lib_logger as well as client specific logging."""
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s %(name)s %(funcName)s: %(message)s')
lib_logger = controler.lib_logger
client_logger = logging.getLogger('hamster_cli')
# Clear any existing (null)Handlers
lib_logger.handlers = []
client_logger.handlers = []
client_logger.setLevel(controler.client_config['log_level'])
lib_logger.setLevel(controler.client_config['log_level'])
controler.client_logger = client_logger
if controler.client_config['log_console']:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
lib_logger.addHandler(console_handler)
client_logger.addHandler(console_handler)
if controler.client_config['logfile_path']:
filename = controler.client_config['logfile_path']
file_handler = logging.FileHandler(filename, encoding='utf-8')
file_handler.setFormatter(formatter)
lib_logger.addHandler(file_handler)
client_logger.addHandler(file_handler)
def _get_config(config_instance):
"""
Rertrieve config dictionaries for backend and client setup.
Raises:
ValueError: Raised if we fail to process the user supplied config information.
Please note that there will be no log entry as at this point, logging has not
been set up yet.
Returns:
tuple: ``backend_config, client_config)`` tuple, where each element is a
dictionary storing relevant config data.
"""
# [TODO]
# We propably can make better use of configparsers default config optionn,
# but for now this will do.
def get_client_config(config):
"""
Process client section of provided config and turn it into proper config dictionary.
Make sure config values are of proper type and provide basic
sanity checks (e.g. make sure we got a filename if we want to log to
file and such..).
Not all key/values returned here need to be user configurable!
It is worth noting that this is where we turn our user provided config information
into the actual dictionaries to be consumed by our backend and client objects.
A particular consequence is that the division of "Client/Backend" in the config
file is purely cosmetic. Another consequence is that not all user provided config
information has to be processed at all. We just take what we need and can safely
ignore the rest. That way we can improve the config file layout without having to
adjust our code all the time. It also means our main code does not have to deal with
turning ``path`` plus ``name`` into a full location and such.
"""
def get_logfile_path():
log_dir = AppDirs.user_log_dir
return os.path.join(log_dir, config.get('Client', 'log_filename'))
def get_log_level():
try:
log_level = LOG_LEVELS[config.get('Client', 'log_level').lower()]
except KeyError:
raise ValueError(_("Unrecognized log level value in config"))
return log_level
def get_log_console():
return config.getboolean('Client', 'log_console')
def get_export_dir():
"""Return path to save exports to. Filenextension will be added by export method."""
return os.path.join(AppDirs.user_data_dir, 'export')
return {
'log_level': get_log_level(),
'log_console': get_log_console(),
'logfile_path': get_logfile_path(),
'export_path': get_export_dir(),
}
def get_backend_config(config):
"""
Return properly populated config dictionaries for consumption by our application.
Make sure config values are of proper type and provide basic
sanity checks (e.g. make sure we got a filename if we want to log to
file and such..).
Setting of config values that are not actually derived from our config file but by
inspecting our runtime environment (e.g. path information) happens here as well.
Note:
At least the validation code/sanity checks may be relevant to other
clients as well. So mabe this qualifies for inclusion into
hammsterlib?
"""
def get_day_start():
try:
day_start = datetime.datetime.strptime(config.get('Backend',
'daystart'), '%H:%M:%S').time()
except ValueError:
raise ValueError(_("We encountered an error when parsing configs"
"'day_start' value! Aborting ..."))
return day_start
def get_store():
store = config.get('Backend', 'store')
if store not in hamster_lib.lib.REGISTERED_BACKENDS.keys():
raise ValueError(_("Unrecognized store option."))
return store
def get_db_path():
return config.get('Backend', 'db_path')
def get_fact_min_delta():
return config.get('Backend', 'fact_min_delta')
def get_tmpfile_path():
"""Return path to file used to store *ongoing fact*."""
return os.path.join(AppDirs.user_data_dir, 'hamster_cli.fact')
def get_db_config():
"""Provide a dict with db-specifiy key/value to be added to the backend config."""
result = {}
engine = config.get('Backend', 'db_engine')
result = {'db_engine': engine}
if engine == 'sqlite':
result.update({'db_path': config.get('Backend', 'db_path')})
else:
try:
result.update({'db_port': config.get('Backend', 'db_port')})
except KeyError:
pass
result.update({
'db_host': config.get('Backend', 'db_host'),
'db_name': config.get('Backend', 'db_name'),
'db_user': config.get('Backend', 'db_user'),
'db_password': config.get('Backend', 'db_password'),
})
return result
backend_config = {
'store': get_store(),
'day_start': get_day_start(),
'fact_min_delta': get_fact_min_delta(),
'tmpfile_path': get_tmpfile_path(),
}
backend_config.update(get_db_config())
return backend_config
return (get_backend_config(config_instance), get_client_config(config_instance))
def _get_config_instance():
"""
Return a SafeConfigParser instance.
If we can not find a config file under its expected location, we trigger creation
of a new default file and return its instance.
Returns:
SafeConfigParser: Either the config loaded from file or an instance representing
the content of our newly creating default config.
"""
config = SafeConfigParser()
configfile_path = _get_config_path()
if not config.read(configfile_path):
click.echo(_("No valid config file found. Trying to create a new default config"
" at: '{}'.".format(configfile_path)))
config = _write_config_file(configfile_path)
click.echo(_("A new default config file has been successfully created."))
return config
def _get_config_path():
"""Show general information upon client launch."""
config_dir = AppDirs.user_config_dir
config_filename = 'hamster_cli.conf'
return os.path.join(config_dir, config_filename)
def _write_config_file(file_path):
"""
Write a default config file to the specified location.
Returns:
SafeConfigParser: Instace written to file.
"""
# [FIXME]
# This may be usefull to turn into a proper command, so users can restore to
# factory settings easily.
def get_db_path():
return os.path.join(str(AppDirs.user_data_dir), 'hamster_cli.sqlite')
def get_tmp_file_path():
return os.path.join(str(AppDirs.user_data_dir), 'hamster_cli.fact')
config = SafeConfigParser()
# Backend
config.add_section('Backend')
config.set('Backend', 'store', 'sqlalchemy')
config.set('Backend', 'daystart', '00:00:00')
config.set('Backend', 'fact_min_delta', '60')
config.set('Backend', 'db_engine', 'sqlite')
config.set('Backend', 'db_host', '')
config.set('Backend', 'db_port', '')
config.set('Backend', 'db_name', '')
config.set('Backend', 'db_path', get_db_path())
config.set('Backend', 'db_user', '')
config.set('Backend', 'db_password', '')
# Client
config.add_section('Client')
config.set('Client', 'unsorted_localized', 'Unsorted')
config.set('Client', 'log_level', 'debug')
config.set('Client', 'log_console', 'False')
config.set('Client', 'log_filename', 'hamster_cli.log')
configfile_path = os.path.dirname(file_path)
if not os.path.lexists(configfile_path):
os.makedirs(configfile_path)
with open(file_path, 'w') as fobj:
config.write(fobj)
return config
def _generate_facts_table(facts):
"""
Create a nice looking table representing a set of fact instances.
Returns a (table, header) tuple. 'table' is a list of ``TableRow``
instances representing a single fact.
"""
# If you want to change the order just adjust the dict.
headers = {
'start': _("Start"),
'end': _("End"),
'activity': _("Activity"),
'category': _("Category"),
'description': _("Description"),
'delta': _("Duration")
}
columns = ('start', 'end', 'activity', 'category', 'description',
'delta')
header = [headers[column] for column in columns]
TableRow = namedtuple('TableRow', columns)
table = []
for fact in facts:
if fact.category:
category = fact.category.name
else:
category = ''
table.append(TableRow(
activity=fact.activity.name,
category=category,
description=fact.description,
start=fact.start.strftime('%Y-%m-%d %H:%M'),
end=fact.end.strftime('%Y-%m-%d %H:%M'),
# [TODO]
# Use ``Fact.get_string_delta`` instead!
delta='{minutes} min.'.format(minutes=(int(fact.delta.total_seconds() / 60))),
))
return (table, header)
def _show_greeting():
"""Display a greeting message providing basic set of information."""
click.echo(_("Welcome to 'hamster_cli', your friendly time tracker for the command line."))
click.echo("Copyright (C) 2015-2016, Eric Goller <elbenfreund@DenkenInEchtzeit.net>")
click.echo(_(
"'hamster_cli' is published under the terms of the GPL3, for details please use"
"the 'license' command."
))
click.echo()
| gpl-3.0 |
adw0rd/lettuce | tests/integration/lib/Django-1.3/django/contrib/sessions/backends/cache.py | 268 | 1881 | from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import cache
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
def load(self):
session_data = self._cache.get(self.session_key)
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self.session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.session_key, self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
if self._cache.has_key(session_key):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
self._cache.delete(session_key)
| gpl-3.0 |
jkorell/PTVS | Python/Tests/TestData/DebugAttach/Simple.py | 18 | 1635 | from threading import Thread, current_thread, Lock
from time import sleep
report_progress_now = []
progress_lock = Lock()
def check_report_progress(me, id):
global report_progress_now, progress_lock
if report_progress_now[id]:
progress_lock.acquire()
print("{} [{}] is making progress.".format(me.name, me.ident))
report_progress_now[id] = False
progress_lock.release()
def exception_spam(id):
me = current_thread()
while True:
try:
raise Exception()
except Exception:
pass
check_report_progress(me, id)
def sleep_forever(id):
me = current_thread()
while True:
sleep(10)
check_report_progress(me, id)
def busy_loop(id):
me = current_thread()
i = 0
while True:
i = (i % 100000000) + 1
check_report_progress(me, id)
# if i % 10000000 == 0: raise Exception()
if __name__ == '__main__':
num_threads = 10
thread_list = []
thread_fun, main_fun = exception_spam, busy_loop
for i in range(num_threads):
thread_list.append(Thread(target=thread_fun,args=(i,)))
report_progress_now.append(True)
for t in thread_list:
t.start()
report_progress_now.append(True)
me, id = current_thread(), num_threads
while True:
try:
main_fun(id)
except KeyboardInterrupt:
progress_lock.acquire()
for i, _ in enumerate(report_progress_now):
report_progress_now[i] = True
progress_lock.release()
| apache-2.0 |
rbelzile/python-myfitnesspal | myfitnesspal/entry.py | 1 | 1558 | import re
from myfitnesspal.base import MFPBase
class Entry(MFPBase):
def __init__(self, name, nutrition):
self._name = name
self._nutrition = nutrition
#split out quantity and measuring unit out of entry name
regex = r'(?P<short_name>.+), (?P<quantity>\d[\d\.]*) (?P<unit>[\w\(\)]+)(?: \(.*\))?'
match = re.search(regex, name)
self._quantity = None
self._unit = None
self._short_name = None
if match:
self._quantity = match.group('quantity')
self._unit = match.group('unit')
self._short_name = match.group('short_name')
def __getitem__(self, value):
return self.totals[value]
def keys(self):
return self.totals.keys()
@property
def name(self):
return self._name.strip()
@property
def nutrition_information(self):
return self._nutrition
@property
def totals(self):
return self.nutrition_information
def get_as_dict(self):
return {
'name': self.name,
'nutrition_information': self.nutrition_information,
}
def __unicode__(self):
return u'%s %s' % (
self.name,
self.nutrition_information,
)
@property
def short_name(self):
if self._short_name:
return self._short_name.strip()
return self._short_name
@property
def unit(self):
return self._unit
@property
def quantity(self):
return self._quantity
| mit |
asdfsx/chardet | chardet/charsetgroupprober.py | 53 | 3534 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self, lang_filter=None):
super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
self._active_num = 0
self.probers = []
self._best_guess_prober = None
def reset(self):
super(CharSetGroupProber, self).reset()
self._active_num = 0
for prober in self.probers:
if prober:
prober.reset()
prober.active = True
self._active_num += 1
self._best_guess_prober = None
@property
def charset_name(self):
if not self._best_guess_prober:
self.get_confidence()
if not self._best_guess_prober:
return None
return self._best_guess_prober.charset_name
def feed(self, byte_str):
for prober in self.probers:
if not prober:
continue
if not prober.active:
continue
state = prober.feed(byte_str)
if not state:
continue
if state == ProbingState.found_it:
self._best_guess_prober = prober
return self.state
elif state == ProbingState.not_me:
prober.active = False
self._active_num -= 1
if self._active_num <= 0:
self._state = ProbingState.not_me
return self.state
return self.state
def get_confidence(self):
state = self.state
if state == ProbingState.found_it:
return 0.99
elif state == ProbingState.not_me:
return 0.01
best_conf = 0.0
self._best_guess_prober = None
for prober in self.probers:
if not prober:
continue
if not prober.active:
self.logger.debug('%s not active', prober.charset_name)
continue
conf = prober.get_confidence()
self.logger.debug('%s confidence = %s', prober.charset_name, conf)
if best_conf < conf:
best_conf = conf
self._best_guess_prober = prober
if not self._best_guess_prober:
return 0.0
return best_conf
| lgpl-2.1 |
XiaosongWei/chromium-crosswalk | tools/telemetry/telemetry/timeline/event_container.py | 15 | 5229 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.timeline import async_slice as async_slice_module
from telemetry.timeline import flow_event as flow_event_module
from telemetry.timeline import slice as slice_module
class TimelineEventContainer(object):
"""Represents a container for events.
"""
def __init__(self, name, parent):
self.parent = parent
self.name = name
@staticmethod
def IsAsyncSlice(t):
return t == async_slice_module.AsyncSlice
# Basic functions that subclasses of TimelineEventContainer should implement
# in order to expose their events. New methods should be added to this part of
# the code only when absolutely certain they're needed.
def IterChildContainers(self):
raise NotImplementedError()
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
"""Iterates all the TimelineEvents in this container.
Only events with a type matching event_type_predicate AND matching event
event_predicate will be yielded.
event_type_predicate is given an actual type object, e.g.:
event_type_predicate(slice_module.Slice)
event_predicate is given actual events:
event_predicate(thread.slices[7])
DO NOT ASSUME that the event_type_predicate will be called for every event
found. The relative calling order of the two is left up to the implementer
of the method.
"""
del event_type_predicate, event_predicate # unused
return
yield # pylint: disable=unreachable
def IterAllEvents(self,
recursive=True,
event_type_predicate=lambda t: True,
event_predicate=lambda e: True):
"""Iterates all events in this container, pre-filtered by two predicates.
Only events with a type matching event_type_predicate AND matching event
event_predicate will be yielded.
event_type_predicate is given an actual type object, e.g.:
event_type_predicate(slice_module.Slice)
event_predicate is given actual events:
event_predicate(thread.slices[7])
"""
if not recursive:
for e in self.IterEventsInThisContainer(
event_type_predicate, event_predicate):
yield e
return
# TODO(nduca): Write this as a proper iterator instead of one that creates a
# list and then iterates it.
containers = []
def GetContainersRecursive(container):
containers.append(container)
for container in container.IterChildContainers():
GetContainersRecursive(container)
GetContainersRecursive(self)
# Actually create the iterator.
for c in containers:
for e in c.IterEventsInThisContainer(event_type_predicate,
event_predicate):
yield e
# Helper functions for finding common kinds of events. Must always take an
# optinal recurisve parameter and be implemented in terms fo IterAllEvents.
def IterAllEventsOfName(self, name, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: True,
event_predicate=lambda e: e.name == name)
def IterAllSlices(self, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: t == slice_module.Slice)
def IterAllSlicesInRange(self, start, end, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: t == slice_module.Slice,
event_predicate=lambda s: s.start >= start and s.end <= end)
def IterAllSlicesOfName(self, name, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: t == slice_module.Slice,
event_predicate=lambda e: e.name == name)
def IterAllToplevelSlicesOfName(self, name, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: t == slice_module.Slice,
event_predicate=lambda e: e.name == name and e.parent_slice == None)
def IterAllAsyncSlicesOfName(self, name, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=self.IsAsyncSlice,
event_predicate=lambda e: e.name == name)
def IterAllAsyncSlicesStartsWithName(self, name, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=self.IsAsyncSlice,
event_predicate=lambda e: e.name.startswith(name))
def IterAllFlowEvents(self, recursive=True):
return self.IterAllEvents(
recursive=recursive,
event_type_predicate=lambda t: t == flow_event_module.FlowEvent)
# List versions. These should always be simple expressions that list() on
# an underlying iter method.
def GetAllEvents(self, recursive=True):
return list(self.IterAllEvents(recursive=recursive))
def GetAllEventsOfName(self, name, recursive=True):
return list(self.IterAllEventsOfName(name, recursive))
def GetAllToplevelSlicesOfName(self, name, recursive=True):
return list(self.IterAllToplevelSlicesOfName(name, recursive))
| bsd-3-clause |
leansoft/edx-platform | cms/djangoapps/contentstore/views/tests/test_unit_page.py | 222 | 2775 | """
Unit tests for the unit page.
"""
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.x_module import STUDENT_VIEW
class UnitPageTestCase(StudioPageTestCase):
"""
Unit tests for the unit page.
"""
def setUp(self):
super(UnitPageTestCase, self).setUp()
self.vertical = ItemFactory.create(parent_location=self.sequential.location,
category='vertical', display_name='Unit')
self.video = ItemFactory.create(parent_location=self.vertical.location,
category="video", display_name="My Video")
self.store = modulestore()
def test_public_component_preview_html(self):
"""
Verify that a public xblock's preview returns the expected HTML.
"""
published_video = self.store.publish(self.video.location, self.user.id)
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_draft_component_preview_html(self):
"""
Verify that a draft xblock's preview returns the expected HTML.
"""
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_public_child_container_preview_html(self):
"""
Verify that a public child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
published_child_container = self.store.publish(child_container.location, self.user.id)
self.validate_preview_html(published_child_container, STUDENT_VIEW, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
draft_child_container = self.store.get_item(child_container.location)
self.validate_preview_html(draft_child_container, STUDENT_VIEW, can_add=False)
| agpl-3.0 |
bratsche/Neutron-Drive | neutron-drive/django/db/models/fields/subclassing.py | 104 | 1819 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the __metaclass__ for your Field subclass, implement
to_python() and the other necessary methods and everything will work seamlessly.
"""
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| bsd-3-clause |
R4stl1n/allianceauth | allianceauth/eveonline/autogroups/tests/test_models.py | 4 | 11512 | from django.test import TestCase
from django.contrib.auth.models import Group
from allianceauth.tests.auth_utils import AuthUtils
from allianceauth.eveonline.models import EveCharacter, EveCorporationInfo, EveAllianceInfo
from ..models import AutogroupsConfig, get_users_for_state
from . import patch, connect_signals, disconnect_signals
class AutogroupsConfigTestCase(TestCase):
def setUp(self):
# Disconnect signals
disconnect_signals()
state = AuthUtils.get_member_state()
self.alliance = EveAllianceInfo.objects.create(
alliance_id='3456',
alliance_name='alliance name',
alliance_ticker='TIKR',
executor_corp_id='2345',
)
self.corp = EveCorporationInfo.objects.create(
corporation_id='2345',
corporation_name='corp name',
corporation_ticker='TIKK',
member_count=10,
alliance=self.alliance,
)
state.member_alliances.add(self.alliance)
state.member_corporations.add(self.corp)
self.member = AuthUtils.create_member('test user')
def tearDown(self):
# Reconnect signals
connect_signals()
def test_get_users_for_state(self):
result = get_users_for_state(self.member.profile.state)
self.assertIn(self.member, result)
self.assertEqual(len(result), 1)
@patch('.models.AutogroupsConfig.update_alliance_group_membership')
@patch('.models.AutogroupsConfig.update_corp_group_membership')
def test_update_group_membership(self, update_corp, update_alliance):
agc = AutogroupsConfig.objects.create()
agc.update_group_membership_for_user(self.member)
self.assertTrue(update_corp.called)
self.assertTrue(update_alliance.called)
args, kwargs = update_corp.call_args
self.assertEqual(args[0], self.member)
args, kwargs = update_alliance.call_args
self.assertEqual(args[0], self.member)
def test_update_alliance_group_membership(self):
obj = AutogroupsConfig.objects.create(alliance_groups=True)
obj.states.add(AuthUtils.get_member_state())
char = EveCharacter.objects.create(
character_id='1234',
character_name='test character',
corporation_id='2345',
corporation_name='test corp',
corporation_ticker='tickr',
alliance_id='3456',
alliance_name='alliance name',
)
self.member.profile.main_character = char
self.member.profile.save()
pre_groups = self.member.groups.all()
# Act
obj.update_alliance_group_membership(self.member)
obj.update_corp_group_membership(self.member) # check for no side effects
group = obj.create_alliance_group(self.alliance)
group_qs = Group.objects.filter(pk=group.pk)
self.assertIn(group, self.member.groups.all())
self.assertQuerysetEqual(self.member.groups.all(), map(repr, pre_groups | group_qs), ordered=False)
def test_update_alliance_group_membership_no_main_character(self):
obj = AutogroupsConfig.objects.create()
obj.states.add(AuthUtils.get_member_state())
# Act
obj.update_alliance_group_membership(self.member)
group = obj.get_alliance_group(self.alliance)
self.assertNotIn(group, self.member.groups.all())
def test_update_alliance_group_membership_no_alliance_model(self):
obj = AutogroupsConfig.objects.create()
obj.states.add(AuthUtils.get_member_state())
char = EveCharacter.objects.create(
character_id='1234',
character_name='test character',
corporation_id='2345',
corporation_name='test corp',
corporation_ticker='tickr',
alliance_id='3459',
alliance_name='alliance name',
)
self.member.profile.main_character = char
self.member.profile.save()
# Act
obj.update_alliance_group_membership(self.member)
group = obj.get_alliance_group(self.alliance)
self.assertNotIn(group, self.member.groups.all())
def test_update_corp_group_membership(self):
obj = AutogroupsConfig.objects.create(corp_groups=True)
obj.states.add(AuthUtils.get_member_state())
char = EveCharacter.objects.create(
character_id='1234',
character_name='test character',
corporation_id='2345',
corporation_name='test corp',
corporation_ticker='tickr',
alliance_id='3456',
alliance_name='alliance name',
)
self.member.profile.main_character = char
self.member.profile.save()
pre_groups = self.member.groups.all()
# Act
obj.update_corp_group_membership(self.member)
group = obj.get_corp_group(self.corp)
group_qs = Group.objects.filter(pk=group.pk)
self.assertIn(group, self.member.groups.all())
self.assertQuerysetEqual(self.member.groups.all(), map(repr, pre_groups | group_qs), ordered=False)
def test_update_corp_group_membership_no_state(self):
obj = AutogroupsConfig.objects.create(corp_groups=True)
char = EveCharacter.objects.create(
character_id='1234',
character_name='test character',
corporation_id='2345',
corporation_name='test corp',
corporation_ticker='tickr',
alliance_id='3456',
alliance_name='alliance name',
)
self.member.profile.main_character = char
self.member.profile.save()
pre_groups = list(self.member.groups.all())
# Act
obj.update_corp_group_membership(self.member)
group = obj.get_corp_group(self.corp)
post_groups = list(self.member.groups.all())
self.assertNotIn(group, post_groups)
self.assertListEqual(pre_groups, post_groups)
def test_update_corp_group_membership_no_main_character(self):
obj = AutogroupsConfig.objects.create()
obj.states.add(AuthUtils.get_member_state())
# Act
obj.update_corp_group_membership(self.member)
group = obj.get_corp_group(self.corp)
self.assertNotIn(group, self.member.groups.all())
def test_update_corp_group_membership_no_corp_model(self):
obj = AutogroupsConfig.objects.create()
obj.states.add(AuthUtils.get_member_state())
char = EveCharacter.objects.create(
character_id='1234',
character_name='test character',
corporation_id='2348',
corporation_name='test corp',
corporation_ticker='tickr',
alliance_id='3456',
alliance_name='alliance name',
)
self.member.profile.main_character = char
self.member.profile.save()
# Act
obj.update_corp_group_membership(self.member)
group = obj.get_corp_group(self.corp)
self.assertNotIn(group, self.member.groups.all())
def test_remove_user_from_alliance_groups(self):
obj = AutogroupsConfig.objects.create()
result = obj.get_alliance_group(self.alliance)
result.user_set.add(self.member)
self.assertIn(result, self.member.groups.all())
# Act
obj.remove_user_from_alliance_groups(self.member)
self.assertNotIn(result, self.member.groups.all())
def test_remove_user_from_corp_groups(self):
obj = AutogroupsConfig.objects.create()
result = obj.get_corp_group(self.corp)
result.user_set.add(self.member)
self.assertIn(result, self.member.groups.all())
# Act
obj.remove_user_from_corp_groups(self.member)
self.assertNotIn(result, self.member.groups.all())
def test_get_alliance_group(self):
obj = AutogroupsConfig.objects.create()
result = obj.get_alliance_group(self.alliance)
group = Group.objects.get(name='Alliance alliance name')
self.assertEqual(result, group)
self.assertEqual(obj.get_alliance_group_name(self.alliance), result.name)
self.assertTrue(obj.alliance_managed_groups.filter(pk=result.pk).exists())
def test_get_corp_group(self):
obj = AutogroupsConfig.objects.create()
result = obj.get_corp_group(self.corp)
group = Group.objects.get(name='Corp corp name')
self.assertEqual(result, group)
self.assertEqual(obj.get_corp_group_name(self.corp), group.name)
self.assertTrue(obj.corp_managed_groups.filter(pk=group.pk).exists())
def test_create_alliance_group(self):
obj = AutogroupsConfig.objects.create()
result = obj.create_alliance_group(self.alliance)
group = Group.objects.get(name='Alliance alliance name')
self.assertEqual(result, group)
self.assertEqual(obj.get_alliance_group_name(self.alliance), group.name)
self.assertTrue(obj.alliance_managed_groups.filter(pk=group.pk).exists())
def test_create_corp_group(self):
obj = AutogroupsConfig.objects.create()
result = obj.create_corp_group(self.corp)
group = Group.objects.get(name='Corp corp name')
self.assertEqual(result, group)
self.assertEqual(obj.get_corp_group_name(self.corp), group.name)
self.assertTrue(obj.corp_managed_groups.filter(pk=group.pk).exists())
def test_delete_alliance_managed_groups(self):
obj = AutogroupsConfig.objects.create()
obj.create_alliance_group(self.alliance)
self.assertTrue(obj.alliance_managed_groups.all().exists())
obj.delete_alliance_managed_groups()
self.assertFalse(obj.alliance_managed_groups.all().exists())
def test_delete_corp_managed_groups(self):
obj = AutogroupsConfig.objects.create()
obj.create_corp_group(self.corp)
self.assertTrue(obj.corp_managed_groups.all().exists())
obj.delete_corp_managed_groups()
self.assertFalse(obj.corp_managed_groups.all().exists())
def test_get_alliance_group_name(self):
obj = AutogroupsConfig()
obj.replace_spaces = True
obj.replace_spaces_with = '_'
result = obj.get_alliance_group_name(self.alliance)
self.assertEqual(result, 'Alliance_alliance_name')
def test_get_alliance_group_name_ticker(self):
obj = AutogroupsConfig()
obj.replace_spaces = True
obj.replace_spaces_with = '_'
obj.alliance_name_source = obj.OPT_TICKER
result = obj.get_alliance_group_name(self.alliance)
self.assertEqual(result, 'Alliance_TIKR')
def test_get_corp_group_name(self):
obj = AutogroupsConfig()
obj.replace_spaces = True
obj.replace_spaces_with = '_'
result = obj.get_corp_group_name(self.corp)
self.assertEqual(result, 'Corp_corp_name')
def test_get_corp_group_name_ticker(self):
obj = AutogroupsConfig()
obj.replace_spaces = True
obj.replace_spaces_with = '_'
obj.corp_name_source = obj.OPT_TICKER
result = obj.get_corp_group_name(self.corp)
self.assertEqual(result, 'Corp_TIKK')
def test__replace_spaces(self):
obj = AutogroupsConfig()
obj.replace_spaces = True
obj.replace_spaces_with = '*'
name = ' test name '
result = obj._replace_spaces(name)
self.assertEqual(result, 'test*name')
| gpl-2.0 |
NTesla/pattern | pattern/web/cache/__init__.py | 21 | 4199 | #### PATTERN | CACHE ###############################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
try:
import hashlib; md5=hashlib.md5
except:
import md5; md5=md5.new
#### UNICODE #######################################################################################
def decode_string(v, encoding="utf-8"):
""" Returns the given value as a Unicode string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try: return v.decode(*e)
except:
pass
return v
return unicode(v)
def encode_string(v, encoding="utf-8"):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try: return v.encode(*e)
except:
pass
return v
return str(v)
decode_utf8 = decode_string
encode_utf8 = encode_string
#### CACHE #########################################################################################
# Caching is implemented in URL.download(), which is used by all other downloaders.
import os
import glob
import tempfile
import codecs
import datetime
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
TMP = os.path.join(tempfile.gettempdir(), "pattern_web")
def date_now():
return datetime.datetime.today()
def date_modified(path):
return datetime.datetime.fromtimestamp(os.stat(path)[8])
class Cache(object):
def __init__(self, path=os.path.join(MODULE, "tmp")):
""" Cache with data stored as files with hashed filenames.
Content retrieved from URLs and search engines are stored in cache for performance.
The path where the cache is stored can be given. This way you can manage persistent
sets of downloaded data. If path=TMP, cached items are stored in a temporary folder.
"""
self.path = path
def _get_path(self):
return self._path
def _set_path(self, path):
if not os.path.isdir(path):
os.makedirs(path)
self._path = path
path = property(_get_path, _set_path)
def _hash(self, k):
k = encode_utf8(k) # MD5 works on Python byte strings.
return os.path.join(self.path, md5(k).hexdigest())
def __len__(self):
return len(glob.glob(os.path.join(self.path, "*")))
def __contains__(self, k):
return os.path.exists(self._hash(k))
def __getitem__(self, k):
return self.get(k)
def __setitem__(self, k, v):
f = open(self._hash(k), "wb")
f.write(codecs.BOM_UTF8)
f.write(encode_utf8(v))
f.close()
def __delitem__(self, k):
try: os.unlink(self._hash(k))
except OSError:
pass
def get(self, k, unicode=True):
""" Returns the data stored with the given id.
With unicode=True, returns a Unicode string.
"""
if k in self:
f = open(self._hash(k), "rb"); v=f.read().lstrip(codecs.BOM_UTF8)
f.close()
if unicode is True:
return decode_utf8(v)
else:
return v
raise KeyError(k)
def age(self, k):
""" Returns the age of the cached item, in days.
"""
p = self._hash(k)
return os.path.exists(p) and (date_now() - date_modified(p)).days or 0
def clear(self, age=None):
""" Clears all items from the cache (whose age is the given amount of days or older).
"""
n = date_now()
for p in glob.glob(os.path.join(self.path, "*")):
if age is None or (n - date_modified(p)).days >= age:
os.unlink(p)
cache = Cache() | bsd-3-clause |
lpryszcz/bin | fastq2fasta.py | 3 | 4418 | #!/usr/bin/env python
desc="""Convert FastQ to FASTA.
Note, reads are reported in arbitrary order due to multiprocessing!
"""
epilog="""
l.p.pryszcz+git@gmail.com
Barcelona, 10/05/2013
"""
import argparse, gzip, os, sys, subprocess
from datetime import datetime
from multiprocessing import Pool
def init_args(*args):
global minLen, qualityTh, offset
minLen, qualityTh, offset = args
def worker(read):
""" """
global minLen, qualityTh, offset
id, seq, spacer, quals = read
for i, (s, q) in enumerate(zip(seq, quals)):
if s == "N" or qualityTh and ord(q)-offset < qualityTh:
break
#skip if too short
if i < minLen:
return 0, ''
#trim seq
seq = seq[:i+1]
return len(seq), '>%s\n%s\n' % (id[1:], seq)
def process(reads, minLen, qualityTh, offset):
"""Process reads on single core"""
for id, seq, spacer, quals in reads:
for i, (s, q) in enumerate(zip(seq, quals)):
if s == "N" or qualityTh and ord(q)-offset < qualityTh:
break
#skip if too short
if i < minLen:
yield 0, ''
else:
#trim seq
seq = seq[:i+1]
yield len(seq), '>%s\n%s\n' % (id[1:], seq)
def fastq2rec(handle):
"""Yield fastq records as tuple"""
read = []
for line in handle:
#skip empty lines
line = line[:-1] #.strip()
if not line:
continue
#store read info
read.append(line)
#report reads
if len(read)==4:
yield read
read = []
def fastq2fasta(handle, output, minLen, qualityTh, offset, bases, nproc=4, verbose=1):
""" """
if nproc>1:
p = Pool(nproc, initializer=init_args, initargs=(minLen, qualityTh, offset))
parser = enumerate(p.imap_unordered(worker, fastq2rec(handle), chunksize=100), 1)
else:
parser = enumerate(process(fastq2rec(handle), minLen, qualityTh, offset), 1)
#parse fastq
i = totsize = 0
for i, (seqlen, fasta) in parser:
if not i%1e5:
sys.stderr.write(' %s \r'%i)
if not seqlen:
continue
# store
output.write(fasta)
totsize += seqlen
# process up to bases
if totsize > bases:
if nproc>1: p.terminate()
break
sys.stderr.write('Reported %s bases from %s reads.\n'%(totsize, i))
def main():
usage = "%(prog)s [options] -v "
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", action="store_true", dest="verbose")
parser.add_argument('--version', action='version', version='1.1a')
parser.add_argument("-i", "--input", nargs="+", default=[sys.stdin], type=file, help="input file [stdin]")
parser.add_argument("-o", "--output", default=sys.stdout, help="output file [stdout]")
parser.add_argument("-l", "--minLen", default=0, type=int,
help="skip reads shorter than [%(default)s]" )
parser.add_argument("-q", "--qualityTh", default=0, type=int,
help="read is clipped @ first base having PHRED quality lower than [%(default)s]" )
parser.add_argument("--offset", default=33, type=int,
help="quality encoding; PHRED+33 (Sanger) or PHRED+64 (Illumina/Solexa) [%(default)s]")
parser.add_argument("-b", "--bases", default=float('inf'), type=float,
help="process up to b bases [%(default)s]" )
parser.add_argument("-t", "--threads", default=4, type=int,
help="no. of cores to use [%(default)s]" )
o = parser.parse_args()
for handle in o.input:
# open gzip file as subprocess
if handle.name.endswith('.gz'):
zcat = subprocess.Popen(['zcat', handle.name], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
handle = zcat.stdout
# convert
fastq2fasta(handle, o.output, o.minLen, o.qualityTh, o.offset, o.bases, o.threads, o.verbose)
if __name__=='__main__':
t0=datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt=datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n" % dt)
| gpl-3.0 |
diogocs1/comps | web/addons/account/account_move_line.py | 11 | 78177 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
part = data.get('partner_id') and partner_obj.browse(cr, uid, data['partner_id'], context=context) or False
if account and data.get('partner_id'):
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = invoice_id and (invoice_id, invoice_names[invoice_id]) or False
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('journal_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref,
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
total_amount = abs(debit - credit)
total_amount_currency = line.amount_currency
amount_residual = line.amount_residual
amount_residual_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount_residual = -amount_residual
amount_residual_currency = -amount_residual_currency
# Get right debit / credit:
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line.currency_id and line.amount_currency:
amount_currency_str = rml_parser.formatLang(amount_residual_currency, currency_obj=line.currency_id)
total_amount_currency_str = rml_parser.formatLang(total_amount_currency, currency_obj=line.currency_id)
if target_currency and line_currency == target_currency and target_currency != company_currency:
debit = debit > 0 and amount_residual_currency or 0.0
credit = credit > 0 and amount_residual_currency or 0.0
amount_currency_str = rml_parser.formatLang(amount_residual, currency_obj=company_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount_currency, currency_obj=target_currency)
else:
debit = debit > 0 and amount_residual or 0.0
credit = credit > 0 and amount_residual or 0.0
amount_str = rml_parser.formatLang(debit or credit, currency_obj=company_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
if target_currency and target_currency != company_currency:
amount_currency_str = rml_parser.formatLang(debit or credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ctx = context.copy()
if target_date:
ctx.update({'date': target_date})
debit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, debit, context=ctx)
credit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, credit, context=ctx)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, total_amount, context=ctx)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['credit'] = credit
ret_line['debit'] = debit
ret_line['amount_str'] = amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_str'] = total_amount_str # For partial reconciliations
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None):
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date""")
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if update_check:
if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.' % (period.name,journal.name)))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the Tax movement
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and not context.get('novalidate') and (context.get('recompute', True) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
django-nonrel/django-nonrel | django/db/backends/oracle/compiler.py | 347 | 2830 | from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
# If this query has limit/offset information, then we expect the
# first column to be an extra "_RN" column that we need to throw
# away.
if self.query.high_mark is not None or self.query.low_mark:
rn_offset = 1
else:
rn_offset = 0
index_start = rn_offset + len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection)
for v in row[rn_offset:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overriden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None
or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
else:
sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
| bsd-3-clause |
vanant/googleads-dfa-reporting-samples | python/v2.1/get_content_categories.py | 1 | 2155 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all available content categories.
Tags: contentCategories.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to look up content categories for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.contentCategories().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for category in response['contentCategories']:
print ('Found content category with ID %s and name "%s".'
% (category['id'], category['name']))
if response['contentCategories'] and response['nextPageToken']:
request = service.contentCategories().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
knewmanTE/FrameworkBenchmarks | toolset/benchmark/test_types/framework_test_type.py | 27 | 4710 | import copy
import sys
import subprocess
from subprocess import PIPE
import requests
# Requests is built ontop of urllib3,
# here we prevent general request logging
import logging
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
from pprint import pprint
class FrameworkTestType:
'''
Interface between a test type (json, query, plaintext, etc) and
the rest of TFB. A test type defines a number of keys it expects
to find in the benchmark_config.json, and this base class handles extracting
those keys and injecting them into the test. For example, if
benchmark_config.json contains a line `"spam" : "foobar"` and a subclasses X
passes an argument list of ['spam'], then after parsing there will
exist a member `X.spam = 'foobar'`.
'''
def __init__(self, name, requires_db=False, accept_header=None, args=[]):
self.name = name
self.requires_db = requires_db
self.args = args
self.out = sys.stdout
self.err = sys.stderr
if accept_header is None:
self.accept_header = self.accept('json')
else:
self.accept_header = accept_header
self.passed = None
self.failed = None
self.warned = None
def accept(self, content_type):
return {
'json': 'application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7',
'html': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'plaintext': 'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
}[content_type]
def setup_out(self, out):
'''
Sets up file-like objects for logging. Used in
cases where it is hard just return the output. Any
output sent to these file objects is also printed to
the console
NOTE: I detest this. It would be much better to use
logging like it's intended
'''
self.out = out
def parse(self, test_keys):
'''
Takes the dict of key/value pairs describing a FrameworkTest
and collects all variables needed by this FrameworkTestType
Raises AttributeError if required keys are missing
'''
if all(arg in test_keys for arg in self.args):
self.__dict__.update({arg: test_keys[arg] for arg in self.args})
return self
else: # This is quite common - most tests don't support all types
raise AttributeError(
"A %s requires the benchmark_config.json to contain %s" % (self.name, self.args))
def request_headers_and_body(self, url):
'''
Downloads a URL and returns the HTTP response headers
and body content as a tuple
'''
print "Accessing URL %s:" % url
self.out.write("Accessing URL %s \n" % url)
headers = {'Accept': self.accept_header}
r = requests.get(url, timeout=15, headers=headers)
headers = r.headers
body = r.content
self.out.write(str(headers))
self.out.write(body)
b = 40
print " Response (trimmed to %d bytes): \"%s\"" % (b, body.strip()[:b])
return headers, body
def verify(self, base_url):
'''
Accesses URL used by this test type and checks the return
values for correctness. Most test types run multiple checks,
so this returns a list of results. Each result is a 3-tuple
of (String result, String reason, String urlTested).
- result : 'pass','warn','fail'
- reason : Short human-readable reason if result was
warn or fail. Please do not print the response as part of this,
other parts of TFB will do that based upon the current logging
settings if this method indicates a failure happened
- urlTested: The exact URL that was queried
Subclasses should make a best-effort attempt to report as many
failures and warnings as they can to help users avoid needing
to run TFB repeatedly while debugging
'''
# TODO make String result into an enum to enforce
raise NotImplementedError("Subclasses must provide verify")
def get_url(self):
'''Returns the URL for this test, like '/json'''
# This is a method because each test type uses a different key
# for their URL so the base class can't know which arg is the URL
raise NotImplementedError("Subclasses must provide get_url")
def copy(self):
'''
Returns a copy that can be safely modified.
Use before calling parse
'''
return copy.copy(self)
| bsd-3-clause |
v1bri/gnuradio | grc/core/Connection.py | 14 | 5933 | """
Copyright 2008-2015 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from . import Constants
from .Element import Element
from .utils import odict
class Connection(Element):
is_connection = True
def __init__(self, flow_graph, porta, portb):
"""
Make a new connection given the parent and 2 ports.
Args:
flow_graph: the parent of this element
porta: a port (any direction)
portb: a port (any direction)
@throws Error cannot make connection
Returns:
a new connection
"""
Element.__init__(self, flow_graph)
source = sink = None
# Separate the source and sink
for port in (porta, portb):
if port.is_source:
source = port
else:
sink = port
if not source:
raise ValueError('Connection could not isolate source')
if not sink:
raise ValueError('Connection could not isolate sink')
busses = len(filter(lambda a: a.get_type() == 'bus', [source, sink])) % 2
if not busses == 0:
raise ValueError('busses must get with busses')
if not len(source.get_associated_ports()) == len(sink.get_associated_ports()):
raise ValueError('port connections must have same cardinality')
# Ensure that this connection (source -> sink) is unique
for connection in flow_graph.connections:
if connection.get_source() is source and connection.get_sink() is sink:
raise LookupError('This connection between source and sink is not unique.')
self._source = source
self._sink = sink
if source.get_type() == 'bus':
sources = source.get_associated_ports()
sinks = sink.get_associated_ports()
for i in range(len(sources)):
try:
flow_graph.connect(sources[i], sinks[i])
except:
pass
def __str__(self):
return 'Connection (\n\t{0}\n\t\t{1}\n\t{2}\n\t\t{3}\n)'.format(
self.get_source().get_parent(),
self.get_source(),
self.get_sink().get_parent(),
self.get_sink(),
)
def is_msg(self):
return self.get_source().get_type() == self.get_sink().get_type() == 'msg'
def is_bus(self):
return self.get_source().get_type() == self.get_sink().get_type() == 'bus'
def validate(self):
"""
Validate the connections.
The ports must match in io size.
"""
"""
Validate the connections.
The ports must match in type.
"""
Element.validate(self)
platform = self.get_parent().get_parent()
source_domain = self.get_source().get_domain()
sink_domain = self.get_sink().get_domain()
if (source_domain, sink_domain) not in platform.connection_templates:
self.add_error_message('No connection known for domains "{0}", "{1}"'.format(
source_domain, sink_domain))
too_many_other_sinks = (
not platform.domains.get(source_domain, []).get('multiple_sinks', False) and
len(self.get_source().get_enabled_connections()) > 1
)
too_many_other_sources = (
not platform.domains.get(sink_domain, []).get('multiple_sources', False) and
len(self.get_sink().get_enabled_connections()) > 1
)
if too_many_other_sinks:
self.add_error_message(
'Domain "{0}" can have only one downstream block'.format(source_domain))
if too_many_other_sources:
self.add_error_message(
'Domain "{0}" can have only one upstream block'.format(sink_domain))
source_size = Constants.TYPE_TO_SIZEOF[self.get_source().get_type()] * self.get_source().get_vlen()
sink_size = Constants.TYPE_TO_SIZEOF[self.get_sink().get_type()] * self.get_sink().get_vlen()
if source_size != sink_size:
self.add_error_message('Source IO size "{0}" does not match sink IO size "{1}".'.format(source_size, sink_size))
def get_enabled(self):
"""
Get the enabled state of this connection.
Returns:
true if source and sink blocks are enabled
"""
return self.get_source().get_parent().get_enabled() and \
self.get_sink().get_parent().get_enabled()
#############################
# Access Ports
#############################
def get_sink(self):
return self._sink
def get_source(self):
return self._source
##############################################
# Import/Export Methods
##############################################
def export_data(self):
"""
Export this connection's info.
Returns:
a nested data odict
"""
n = odict()
n['source_block_id'] = self.get_source().get_parent().get_id()
n['sink_block_id'] = self.get_sink().get_parent().get_id()
n['source_key'] = self.get_source().get_key()
n['sink_key'] = self.get_sink().get_key()
return n
| gpl-3.0 |
svn2github/cef1 | tools/repack_locales.py | 4 | 5714 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to repack paks for a list of locales.
Gyp doesn't have any built-in looping capability, so this just provides a way to
loop over a list of locales when repacking pak files, thus avoiding a
proliferation of mostly duplicate, cut-n-paste gyp actions.
"""
import getopt
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'tools', 'grit'))
from grit.format import data_pack
# Some build paths defined by gyp.
GRIT_DIR = None
SHARE_INT_DIR = None
INT_DIR = None
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def calc_output(locale):
"""Determine the file that will be generated for the given locale."""
#e.g. '<(INTERMEDIATE_DIR)/repack/da.pak',
# For Fake Bidi, generate it at a fixed path so that tests can safely
# reference it.
if locale == 'fake-bidi':
return '%s/%s.pak' % (INT_DIR, locale)
if sys.platform in ('darwin',):
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if locale == 'en-US':
locale = 'en'
return '%s/repack/%s.lproj/locale.pak' % (INT_DIR, locale.replace('-', '_'))
else:
return os.path.join(INT_DIR, 'repack', locale + '.pak')
def calc_inputs(locale):
"""Determine the files that need processing for the given locale."""
inputs = []
#e.g. '<(grit_out_dir)/generated_resources_da.pak'
#inputs.append(os.path.join(GRIT_DIR, 'generated_resources_%s.pak' % locale))
#e.g. '<(grit_out_dir)/locale_settings_da.pak'
#inputs.append(os.path.join(GRIT_DIR, 'locale_settings_%s.pak' % locale))
#e.g. '<(grit_out_dir)/platform_locale_settings_da.pak'
#inputs.append(os.path.join(GRIT_DIR,
# 'platform_locale_settings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_da.pak'
inputs.append(os.path.join(SHARE_INT_DIR, 'webkit',
'webkit_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/ui_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'ui_strings',
'ui_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/app_locale_settings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'app_locale_settings',
'app_locale_settings_%s.pak' % locale))
return inputs
def list_outputs(locales):
"""Returns the names of files that will be generated for the given locales.
This is to provide gyp the list of output files, so build targets can
properly track what needs to be built.
"""
outputs = []
for locale in locales:
outputs.append(calc_output(locale))
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in outputs])
def list_inputs(locales):
"""Returns the names of files that will be processed for the given locales.
This is to provide gyp the list of input files, so build targets can properly
track their prerequisites.
"""
inputs = []
for locale in locales:
inputs += calc_inputs(locale)
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in inputs])
def repack_locales(locales):
""" Loop over and repack the given locales."""
for locale in locales:
inputs = []
inputs += calc_inputs(locale)
output = calc_output(locale)
data_pack.DataPack.RePack(output, inputs)
def DoMain(argv):
global GRIT_DIR
global SHARE_INT_DIR
global INT_DIR
short_options = 'iog:s:x:b:h'
long_options = 'help'
print_inputs = False
print_outputs = False
usage_msg = ''
helpstr = """\
Usage: %s [-h] [-i | -o] -g <DIR> -x <DIR> -s <DIR> <locale> [...]
-h, --help Print this help, then exit.
-i Print the expected input file list, then exit.
-o Print the expected output file list, then exit.
-g DIR GRIT build files output directory.
-x DIR Intermediate build files output directory.
-s DIR Shared intermediate build files output directory.
locale [...] One or more locales to repack.""" % (
os.path.basename(__file__))
try:
opts, locales = getopt.getopt(argv, short_options, long_options)
except getopt.GetoptError, msg:
raise Usage(str(msg))
if not locales:
usage_msg = 'Please specificy at least one locale to process.\n'
for o, a in opts:
if o in ('-i'):
print_inputs = True
elif o in ('-o'):
print_outputs = True
elif o in ('-g'):
GRIT_DIR = a
elif o in ('-s'):
SHARE_INT_DIR = a
elif o in ('-x'):
INT_DIR = a
elif o in ('-h', '--help'):
raise Usage(helpstr)
if not (GRIT_DIR and INT_DIR and SHARE_INT_DIR):
usage_msg += 'Please specify all of "-g" and "-x" and "-s".\n'
if print_inputs and print_outputs:
usage_msg += 'Please specify only one of "-i" or "-o".\n'
if usage_msg:
raise Usage(usage_msg)
if print_inputs:
return list_inputs(locales)
if print_outputs:
return list_outputs(locales)
return repack_locales(locales)
if __name__ == '__main__':
results = DoMain(sys.argv[1:])
if results:
print results
| bsd-3-clause |
kyuupichan/electrum | gui/qt/network_dialog.py | 1 | 20239 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import PyQt5.QtCore as QtCore
from electrum.i18n import _
from electrum import constants
from electrum.util import print_error
from electrum.network import serialize_server, deserialize_server
from .util import *
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 20)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
network.register_callback(self.on_network, ['updated', 'interfaces'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Connected node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole))
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
index = item.data(1, Qt.UserRole)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(index))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for k, items in chains.items():
b = network.blockchains[k]
name = b.get_name()
if n_chains >1:
x = QTreeWidgetItem([name + '@%d'%b.get_checkpoint(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.checkpoint)
else:
x = self
for i in items:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains>1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class ServerListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.set_server(server))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = deserialize_server(s)
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.set_server()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, servers, protocol, use_tor):
self.clear()
for _host, d in sorted(servers.items()):
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
x = QTreeWidgetItem([_host, port])
server = serialize_server(_host, port, protocol)
x.setData(1, Qt.UserRole, server)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class NetworkChoiceLayout(object):
def __init__(self, network, config, wizard=False):
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(server_tab, _('Server'))
tabs.addTab(proxy_tab, _('Proxy'))
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.server_host.editingFinished.connect(self.set_server)
self.server_port.editingFinished.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 0, 0, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_host, 1, 1, 1, 2)
grid.addWidget(self.server_port, 1, 3)
label = _('Server peers') if network.is_connected() else _('Default Servers')
grid.addWidget(QLabel(label), 2, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 3, 0, 1, 5)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5', 'HTTP'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(60)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(QIcon(":icons/tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port, self.servers_list]:
w.setEnabled(False)
def update(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host.setText(host)
self.server_port.setText(port)
self.autoconnect_cb.setChecked(auto_connect)
host = self.network.interface.host if self.network.interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
self.servers_list.update(self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains)>1:
chain = self.network.blockchain()
checkpoint = chain.get_checkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(checkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def fill_in_proxy_settings(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = self.server_host.text()
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if p not in pp.keys():
p = list(pp.keys())[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, index):
self.network.follow_chain(index)
self.update()
def follow_server(self, server):
self.network.switch_to_interface(server)
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host, port, protocol = deserialize_server(server)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = list(pp.keys())[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
def accept(self):
pass
def set_server(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host = str(self.server_host.text())
port = str(self.server_port.text())
auto_connect = self.autoconnect_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def set_proxy(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def suggest_proxy(self, found_proxy):
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5') \
and self.proxy_host.text() == "127.0.0.1" \
and self.proxy_port.text() == str(found_proxy[1]):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
print_error("[network_dialog] can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
for p in ports:
if TorDetector.is_tor_port(p):
self.found_proxy.emit(("127.0.0.1", p))
return
@staticmethod
def is_tor_port(port):
try:
s = (socket._socketobject if hasattr(socket, "_socketobject") else socket.socket)(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
| mit |
stephenshank/taed-pv | pv/doc/conf.py | 3 | 8170 | # -*- coding: utf-8 -*-
#
# pv documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 15 08:27:09 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import shutil
pdb_structures = [ '1r6a', '1nmr', '1crn' ]
for pdb_id in pdb_structures:
shutil.copyfile('../pdbs/%s.pdb' % pdb_id, '_static/%s.pdb' % pdb_id)
shutil.copyfile('../bio-pv.min.js', '_static/bio-pv.min.js')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ 'pvsample' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
nitpicky = True
# General information about the project.
project = u'PV'
copyright = u'2013-2015, Marco Biasini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9'
# The full version, including alpha/beta/rc tags.
release = '1.9.0dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [ '_static' ]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pvdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pv.tex', u'pv Documentation',
u'Marco Biasini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pv', u'pv Documentation',
[u'Marco Biasini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pv', u'pv Documentation',
u'Marco Biasini', 'pv', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'js'
| mit |
andrewyoung1991/abjad | abjad/tools/pitchtools/test/test_pitchtools_PitchArray__get_composite_offset_difference_series_from_leavs_in_expr.py | 2 | 1027 | # -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_PitchArray__get_composite_offset_difference_series_from_leavs_in_expr_01():
staff_1 = Staff(r"\times 4/3 { c'8 d'8 e'8 }")
staff_2 = Staff("f'8 g'8 a'8 b'8")
score = Score([staff_1, staff_2])
assert systemtools.TestManager.compare(
score,
r'''
\new Score <<
\new Staff {
\tweak #'text #tuplet-number::calc-fraction-text
\times 4/3 {
c'8
d'8
e'8
}
}
\new Staff {
f'8
g'8
a'8
b'8
}
>>
'''
)
result = pitchtools.PitchArray._get_composite_offset_difference_series_from_leaves_in_expr(score)
assert result == [
Duration(1, 8),
Duration(1, 24),
Duration(1, 12),
Duration(1, 12),
Duration(1, 24),
Duration(1, 8),
] | gpl-3.0 |
explosion/catalogue | catalogue/_importlib_metadata/__init__.py | 1 | 19681 | import os
import re
import abc
import csv
import sys
import zipp
import email
import pathlib
import operator
import functools
import itertools
import posixpath
import collections
from ._compat import (
NullFinder,
PyPy_repr,
install,
Protocol,
)
from configparser import ConfigParser
from contextlib import suppress
from importlib import import_module
from importlib.abc import MetaPathFinder
from itertools import starmap
from typing import Any, List, TypeVar, Union
__all__ = [
'Distribution',
'DistributionFinder',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
tmpl = "No package metadata was found for {self.name}"
return tmpl.format(**locals())
@property
def name(self):
(name,) = self.args
return name
class EntryPoint(
PyPy_repr, collections.namedtuple('EntryPointBase', 'name value group')
):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
config.read_string(text)
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
def __reduce__(self):
return (
self.__class__,
(self.name, self.value, self.group),
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
_T = TypeVar("_T")
class PackageMetadata(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(iter(dists), None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, '_catalogue_find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@classmethod
def _local(cls, root='.'):
from pep517 import build, meta
system = build.compat_system(root)
builder = functools.partial(
meta.build,
source_dir=root,
system=system,
)
return PathDistribution(zipp.Path(meta.build_as_zip(builder)))
@property
def metadata(self) -> PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email.message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and list(starmap(make_file, csv.reader(file_lines)))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in itertools.groupby(
section_pairs, operator.itemgetter('section')
)
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The path that a distribution finder should search.
Typically refers to Python package paths and defaults
to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def _catalogue_find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
"""
def __init__(self, root):
self.root = str(root)
self.base = os.path.basename(self.root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
def search(self, name):
return (
self.joinpath(child)
for child in self.children()
if name.matches(child, self.base)
)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = None
suffixes = '.dist-info', '.egg-info'
exact_matches = [''][:0]
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = self.normalize(name)
self.exact_matches = [self.normalized + suffix for suffix in self.suffixes]
@staticmethod
def normalize(name):
"""
PEP 503 normalization plus dashes as underscores.
"""
return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
@staticmethod
def legacy_normalize(name):
"""
Normalize the package name as found in the convention in
older packaging tools versions and specs.
"""
return name.lower().replace('-', '_')
def matches(self, cand, base):
low = cand.lower()
pre, ext = os.path.splitext(low)
name, sep, rest = pre.partition('-')
return (
low in self.exact_matches
or ext in self.suffixes
and (not self.normalized or name.replace('.', '_') == self.normalized)
# legacy case:
or self.is_egg(base)
and low == 'egg-info'
)
def is_egg(self, base):
normalized = self.legacy_normalize(self.name or '')
prefix = normalized + '-' if normalized else ''
versionless_egg_name = normalized + '.egg' if self.name else ''
return (
base == versionless_egg_name
or base.startswith(prefix)
and base.endswith('.egg')
)
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def _catalogue_find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
path.search(Prepared(name)) for path in map(FastPath, paths)
)
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
:param path: A pathlib.Path or similar object supporting
.joinpath(), __div__, .parent, and .read_text().
"""
self._path = path
def read_text(self, filename):
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name) -> PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {group: tuple(eps) for group, eps in grouped}
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
| mit |
PepperPD/edx-pepper-platform | common/djangoapps/mitxmako/makoloader.py | 15 | 3081 | import logging
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import make_origin, get_template_from_string
from django.template.loaders.filesystem import Loader as FilesystemLoader
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from mitxmako.template import Template
import tempdir
log = logging.getLogger(__name__)
class MakoLoader(object):
"""
This is a Django loader object which will load the template as a
Mako template if the first line is "## mako". It is based off BaseLoader
in django.template.loader.
"""
is_usable = False
def __init__(self, base_loader):
# base_loader is an instance of a BaseLoader subclass
self.base_loader = base_loader
module_directory = getattr(settings, 'MAKO_MODULE_DIR', None)
if module_directory is None:
log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!")
module_directory = tempdir.mkdtemp_clean()
self.module_directory = module_directory
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, file_path = self.load_template_source(template_name, template_dirs)
if source.startswith("## mako\n"):
# This is a mako template
template = Template(filename=file_path,
module_directory=self.module_directory,
input_encoding='utf-8',
output_encoding='utf-8',
uri=template_name)
return template, None
else:
# This is a regular template
origin = make_origin(file_path, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, file_path
def load_template_source(self, template_name, template_dirs=None):
# Just having this makes the template load as an instance, instead of a class.
return self.base_loader.load_template_source(template_name, template_dirs)
def reset(self):
self.base_loader.reset()
class MakoFilesystemLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, FilesystemLoader())
class MakoAppDirectoriesLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, AppDirectoriesLoader())
| agpl-3.0 |
crepererum/invenio | invenio/legacy/bibmerge/engine.py | 15 | 26586 | # This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibMerge Engine."""
import os
import random
import re
from invenio.config import \
CFG_BIBUPLOAD_INTERNAL_DOI_PATTERN, \
CFG_BIBEDIT_INTERNAL_DOI_PROTECTION_LEVEL, CFG_SITE_RECORD
from invenio.legacy.bibmerge.merger import merge_field_group, replace_field, \
add_field, delete_field, merge_field, \
add_subfield, replace_subfield, \
delete_subfield, copy_R2_to_R1, merge_record
from invenio.legacy.search_engine import print_record, perform_request_search, \
record_exists
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibedit.utils import cache_exists, cache_expired, \
create_cache, delete_cache, get_cache_contents, \
get_cache_mtime, latest_record_revision, record_locked_by_other_user, \
record_locked_by_queue, save_xml_record, touch_cache, \
update_cache_contents, _get_file_path, \
get_record_revision_ids, revision_format_valid_p, split_revid, \
get_marcxml_of_revision_id
from invenio.utils.html import remove_html_markup
from invenio.legacy.bibrecord import create_record, record_xml_output, record_add_field, \
record_order_subfields, \
record_extract_dois
from invenio.base.globals import cfg
import invenio.legacy.template
bibmerge_templates = invenio.legacy.template.load('bibmerge')
def perform_request_init():
"""Handle the initial request.
"""
errors = []
warnings = []
body = ''
# Add script data.
data = {'gSITE_RECORD': '"' + CFG_SITE_RECORD + '"'}
body += '<script type="text/javascript">\n'
for key in data:
body += ' var %s = %s;\n' % (key, data[key])
body += ' </script>\n'
# Build page structure and control panel.
body += bibmerge_templates.controlpanel()
body += """
<div id="bibMergeContent">
</div>"""
return body, errors, warnings
def perform_request_ajax(req, uid, data):
"""Ajax request dispatcher.\
"""
requestType = data['requestType']
if requestType in ('getRecordCompare', 'submit', 'cancel', 'recCopy', \
'recMerge', 'recMergeNC'):
return perform_request_record(requestType, uid, data)
elif requestType in ('getFieldGroup', 'getFieldGroupDiff', \
'mergeFieldGroup', 'mergeNCFieldGroup', 'replaceField', 'addField', \
'deleteField', 'mergeField'):
return perform_request_update_record(requestType, uid, data)
elif requestType in ('deleteSubfield', 'addSubfield', 'replaceSubfield', \
'diffSubfield'):
return perform_small_request_update_record(requestType, uid, data)
elif requestType == "searchCandidates" or requestType == "searchRevisions":
return perform_candidate_record_search(requestType, data)
else:
return { 'resultCode': 1, 'resultText': 'Error unknown' }
def perform_candidate_record_search(requestType, data):
"""Handle search requests.
"""
max_results = 999
too_many = False
result = {
'resultCode': 0,
'resultText': ''
}
if requestType == "searchCandidates":
recids = perform_request_search( p=data['query'] )
if len(recids) > max_results:
too_many = True
else:
captions = [ search_result_info(x) for x in recids ]
alternative_titles = [ remove_html_markup(print_record(x, "hs")) for x in recids ]
search_results = [recids, captions, alternative_titles]
elif requestType == "searchRevisions":
revisions = get_record_revision_ids( data['recID1'] )
captions = [ split_revid(x, 'datetext')[1] for x in revisions ]
search_results = [revisions, captions]
if too_many == True:
result['resultCode'] = 1
result['resultText'] = 'Too many results'
else:
result['results'] = search_results
result['resultText'] = '%s results' % len(search_results[0])
return result
def search_result_info(recid):
"""Return report number of a record or if it doen't exist return the recid
itself.
"""
report_numbers = get_fieldvalues(recid, '037__a')
if len(report_numbers) == 0:
return "#"+str(recid)
else:
return report_numbers[0]
def perform_request_record(requestType, uid, data):
"""Handle 'major' record related requests.
Handle retrieving, submitting or cancelling the merging session.
"""
#TODO add checks before submission and cancel, replace get_bibrecord call
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
record1 = _get_record(recid1, uid, result)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'submit':
if 'duplicate' in data:
recid2 = data['duplicate']
record2 = _get_record_slave(recid2, result, 'recid', uid)
if result['resultCode'] != 0: #return in case of error
return result
(errcode, message) = check_doi_status_after_merge(data["recID1"], data['duplicate'],
record1, record2,
record2_marked_as_duplicate_p=data.has_key('duplicate'),
submit_confirmed_p=data.get('additional_data', {'confirmed_submit': False}).get('confirmed_submit', False))
if errcode:
result['resultCode'] = errcode
result['resultText'] = message
return result
# mark record2 as deleted
record_add_field(record2, '980', ' ', ' ', '', [('c', 'DELETED')])
# mark record2 as duplicate of record1
record_add_field(record2, '970', ' ', ' ', '', [('d', str(recid1))])
# add recid of deleted record to master record
record_add_field(record1, '981', ' ', ' ', '', [('a', str(recid2))])
# To ensure updates happen in order, use a seq id
sequence_id = str(random.randrange(1, 4294967296))
# submit record2 to be deleted
xml_record2 = record_xml_output(record2)
save_xml_record(recid2, uid, xml_record2, task_name="bibmerge",
sequence_id=sequence_id)
# submit record1
xml_record1 = record_xml_output(record1)
save_xml_record(recid1, uid, xml_record1, task_name="bibmerge",
sequence_id=sequence_id)
# Delete cache file if it exists
if cache_exists(recid1, uid):
delete_cache(recid1, uid)
result['resultText'] = 'Records submitted'
return result
(errcode, message) = check_doi_status_after_merge(data["recID1"], data["recID2"],
record1, None,
submit_confirmed_p=data.get('additional_data', {'confirmed_submit': False}).get('confirmed_submit', False))
if errcode:
result['resultCode'] = errcode
result['resultText'] = message
return result
#submit record1 from cache
save_xml_record(recid1, uid, task_name="bibmerge")
# Delete cache file if it exists
if cache_exists(recid1, uid):
delete_cache(recid1, uid)
result['resultText'] = 'Record submitted'
return result
elif requestType == 'cancel':
delete_cache(recid1, uid)
result['resultText'] = 'Cancelled'
return result
recid2 = data["recID2"]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getRecordCompare':
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records compared'
elif requestType == 'recCopy':
copy_R2_to_R1(record1, record2)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Record copied'
elif requestType == 'recMerge':
merge_record(record1, record2, merge_conflicting_fields=True)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
elif requestType == 'recMergeNC':
merge_record(record1, record2, merge_conflicting_fields=False)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
def perform_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a field level.
Handle merging, adding, or replacing of fields.
"""
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
record_content = get_cache_contents(recid1, uid)
cache_dirty = record_content[0]
rec_revision = record_content[1]
record1 = record_content[2]
pending_changes = record_content[3]
disabled_hp_changes = record_content[4]
# We will not be able to Undo/Redo correctly after any modifications
# from the level of bibmerge are performed ! We clear all the undo/redo
# lists
undo_list = []
redo_list = []
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getFieldGroup':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = 'Field group retrieved'
return result
elif requestType == 'getFieldGroupDiff':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'], True)
result['resultText'] = 'Fields compared'
return result
elif requestType == 'mergeFieldGroup' or requestType == 'mergeNCFieldGroup':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
if requestType == 'mergeNCFieldGroup':
merge_field_group(record1, record2, fnum, ind1, ind2, False)
else:
merge_field_group(record1, record2, fnum, ind1, ind2, True)
resultText = 'Field group merged'
elif requestType == 'replaceField' or requestType == 'addField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
if requestType == 'replaceField':
replace_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field replaced'
else: # requestType == 'addField'
add_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field added'
elif requestType == 'deleteField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
if findex1 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
delete_field(record1, fnum, findex1)
resultText = 'Field deleted'
elif requestType == 'mergeField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
merge_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = resultText
update_cache_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, undo_list, redo_list)
return result
def perform_small_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a subfield level.
Handle adding, replacing or deleting of subfields.
"""
result = {
'resultCode': 0,
'resultText': '',
'resultHtml': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
cache_content = get_cache_contents(recid1, uid) #TODO: check mtime, existence
cache_dirty = cache_content[0]
rec_revision = cache_content[1]
record1 = cache_content[2]
pending_changes = cache_content[3]
disabled_hp_changes = cache_content[4]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
ftag, findex1 = _field_info(data['fieldCode1'])
fnum = ftag[:3]
findex2 = _field_info(data['fieldCode2'])[1]
sfindex1 = data['sfindex1']
sfindex2 = data['sfindex2']
if requestType == 'deleteSubfield':
delete_subfield(record1, fnum, findex1, sfindex1)
result['resultText'] = 'Subfield deleted'
elif requestType == 'addSubfield':
add_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield added'
elif requestType == 'replaceSubfield':
replace_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield replaced'
elif requestType == 'diffSubfield':
result['resultHtml'] = bibmerge_templates.BM_html_subfield_row_diffed(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfields diffed'
update_cache_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, [], [])
return result
def _get_record(recid, uid, result, fresh_record=False):
"""Retrieve record structure.
"""
record = None
mtime = None
cache_dirty = None
record_status = record_exists(recid)
existing_cache = cache_exists(recid, uid)
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif not existing_cache and record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif existing_cache and cache_expired(recid, uid) and \
record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
if fresh_record:
delete_cache(recid, uid)
existing_cache = False
if not existing_cache:
record_revision, record = create_cache(recid, uid)
mtime = get_cache_mtime(recid, uid)
cache_dirty = False
else:
tmpRes = get_cache_contents(recid, uid)
cache_dirty, record_revision, record = tmpRes[0], tmpRes[1], tmpRes[2]
touch_cache(recid, uid)
mtime = get_cache_mtime(recid, uid)
if not latest_record_revision(recid, record_revision):
result['cacheOutdated'] = True
result['resultCode'], result['resultText'], result['cacheDirty'], result['cacheMTime'] = 0, 'Record OK', cache_dirty, mtime
record_order_subfields(record)
return record
def _get_record_slave(recid, result, mode=None, uid=None):
"""Check if record exists and return it in dictionary format.
If any kind of error occurs returns None.
If mode=='revision' then recid parameter is considered as revid."""
record = None
if recid == 'none':
mode = 'none'
if mode == 'recid':
record_status = record_exists(recid)
#check for errors
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
record = create_record( print_record(recid, 'xm') )[0]
elif mode == 'tmpfile':
file_path = '%s_%s.xml' % (_get_file_path(recid, uid),
cfg['CFG_BIBEDIT_TO_MERGE_SUFFIX'])
if not os.path.isfile(file_path): #check if file doesn't exist
result['resultCode'], result['resultText'] = 1, 'Temporary file doesnt exist'
else: #open file
tmpfile = open(file_path, 'r')
record = create_record( tmpfile.read() )[0]
tmpfile.close()
elif mode == 'revision':
if revision_format_valid_p(recid):
marcxml = get_marcxml_of_revision_id(recid)
if marcxml:
record = create_record(marcxml)[0]
else:
result['resultCode'], result['resultText'] = 1, 'The specified revision does not exist'
else:
result['resultCode'], result['resultText'] = 1, 'Invalid revision id'
elif mode == 'none':
return {}
else:
result['resultCode'], result['resultText'] = 1, 'Invalid record mode for record2'
record_order_subfields(record)
return record
def _field_info(fieldIdCode):
"""Returns a tuple: (field-tag, field-index)
eg.: _field_info('R1-8560_-2') --> ('8560_', 2) """
info = fieldIdCode.split('-')
if info[2] == 'None':
info[2] = None
else:
info[2] = int(info[2])
return tuple( info[1:] )
def _fieldtagNum_and_indicators(fieldTag):
"""Separate a 5-char field tag to a 3-character field-tag number and two
indicators"""
fnum, ind1, ind2 = fieldTag[:3], fieldTag[3], fieldTag[4]
if ind1 == '_':
ind1 = ' '
if ind2 == '_':
ind2 = ' '
return (fnum, ind1, ind2)
def get_dois(record, internal_only_p=False):
"""
Return the list of DOIs in the given record. If C{internal_only_p}
is set to True, only those DOIs that are considered owned/managed
by this installation (as defined in
CFG_BIBUPLOAD_INTERNAL_DOI_PATTERN) will be returned.
@param record: the record we want to get DOIs from
@type record: BibRecord object
@param internal_only_p: if True, returns only DOIs managed/owned by the system
@type internal_only_p: bool
@rtype: list(string)
"""
return [doi for doi in record_extract_dois(record) if \
not internal_only_p or re.compile(CFG_BIBUPLOAD_INTERNAL_DOI_PATTERN).match(doi)]
def check_doi_status_after_merge(original_recid1, original_recid2, final_record1, final_record_2, record2_marked_as_duplicate_p=False, submit_confirmed_p=False):
"""
Check that the result of the merge does not removed DOIs managed
by the system, and that not duplicate DOI would be
created. Returns a tuple(error_code, message).
@param original_recid1: the record ID of the original record 1 (master)
@type original_recid1: int
@param original_recid2: the record ID of the original record 2 (slave)
@type original_recid2: int
@param final_record1: the resulting merged record
@type final_record1: BibRecord object
@param final_record_2: the resulting slave "merged" record (optional when record2_marked_as_duplicate_p is False)
@type final_record_2: BibRecord object
@param record2_marked_as_duplicate_p: True if the record 2 will be marked as duplicate (and deleted)
@type record2_marked_as_duplicate_p: bool
@param submit_confirmed_p: if the user has already confirmed to proceed with submission, according to previous messages displayed. If True, do not ask again confirmation and proceed if all tests pass.
@type submit_confirmed_p: bool
"""
errcode = 0
message = ''
new_record1_dois = get_dois(final_record1)
new_record1_managed_dois = get_dois(final_record1, internal_only_p=True)
original_record1_managed_dois = get_dois(create_record(print_record(original_recid1, 'xm'))[0],
internal_only_p=True)
original_record2_dois = get_dois(create_record(print_record(original_recid2, 'xm'))[0])
# Are there any DOI from record 1 (master) lost in the merging?
lost_dois_in_record1 = [doi for doi in original_record1_managed_dois \
if not doi in new_record1_managed_dois]
# Enough to check for duplicate DOI creation in this record,
# not whole DB
duplicate_dois_after_merge = [doi for doi in new_record1_dois if new_record1_dois.count(doi) > 1]
if record2_marked_as_duplicate_p:
new_record2_managed_dois = get_dois(final_record_2, internal_only_p=True)
original_record2_managed_dois = get_dois(create_record(print_record(original_recid2, 'xm'))[0],
internal_only_p=True)
# Are there any DOI from record 2 (slave) lost in the merging?
lost_dois_in_record2 = [doi for doi in original_record2_managed_dois \
if not doi in new_record1_managed_dois]
else:
lost_dois_in_record2 = []
duplicate_dois_after_merge += [doi for doi in new_record1_dois if doi in original_record2_dois]
if ((lost_dois_in_record1 or lost_dois_in_record2) and \
CFG_BIBEDIT_INTERNAL_DOI_PROTECTION_LEVEL > 0) or \
duplicate_dois_after_merge:
if CFG_BIBEDIT_INTERNAL_DOI_PROTECTION_LEVEL == 1 and \
not duplicate_dois_after_merge and \
not submit_confirmed_p:
errcode = 1
message = 'The resulting merged record misses DOI(s) managed by the system.<script type="text/javascript">%(check_duplicate_box)sif (confirm(\'The resulting merged record will lose DOI(s) managed by the system.\\n' + \
'The following DOI(s) were in the original record (#1) but are not in the final merged one:\\n' + '\\n'.join(lost_dois_in_record1) + \
'\\nAre you sure that you want to submit the merged records without the DOI(s)?\')) {onclickSubmitButton(confirm_p=false, additional_data={\'confirmed_submit\': true})}</script>'
elif duplicate_dois_after_merge and lost_dois_in_record1:
errcode = 1
message = 'The changes cannot be submitted because the resulting merged record (a) misses DOI(s) managed by the system and/or (b) will create duplicate DOIs.<script type="text/javascript">%(check_duplicate_box)salert(\'The changes cannot be submitted because the resulting merged record (a) misses DOI(s) managed by the system and (b) will create duplicate DOIs.\\n' + \
'The following DOI(s) were in the original record (#1) but are not in the final merged one:\\n' + '\\n'.join(lost_dois_in_record1) + \
'\\nThe following DOI(s) would be duplicate after merge:\\n' + '\\n'.join(duplicate_dois_after_merge) + \
'\\nMake sure that the mentionned DOI(s) are included in the final merged record and/or no duplicate DOIs are created (suggestion: merge in the other way around).\');</script>'
elif duplicate_dois_after_merge:
errcode = 1
message = 'The changes cannot be submitted because the resulting merged record will create a duplicate DOI.<script type="text/javascript">%(check_duplicate_box)salert(\'The changes cannot be submitted because the resulting merged record will create a duplicate DOI.\\n' + \
'The following DOI(s) would be duplicate after merge:\\n' + '\\n'.join(duplicate_dois_after_merge) + \
'\\nMake sure that the mentionned DOI(s) are not duplicated (suggestion: merge in the other way around).\');</script>'
elif not (CFG_BIBEDIT_INTERNAL_DOI_PROTECTION_LEVEL == 1 and submit_confirmed_p):
# lost DOIs after merge
errcode = 1
message = 'The changes cannot be submitted because the resulting merged record misses DOI(s) managed by the system.<script type="text/javascript">%(check_duplicate_box)salert(\'The changes cannot be submitted because the resulting merged record misses the DOI(s) managed by the system.\\n' + \
'The following DOI(s) were in the original record (#1) but are not in the final merged one:\\n' + '\\n'.join(lost_dois_in_record1) + \
'\\nMake sure that the mentionned DOI(s) are included in the final merged record.\');</script>'
message = message % {'check_duplicate_box': record2_marked_as_duplicate_p and '$(\'#bibMergeDupeCheckbox\').attr(\'checked\', true);' or ''}
return (errcode, message)
| gpl-2.0 |
sanbinabu/Wox | PythonHome/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py | 979 | 4306 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
| mit |
philanthropy-u/edx-platform | common/djangoapps/student/tests/test_helpers.py | 3 | 5600 | """ Test Student helpers """
import logging
import ddt
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import patch
from testfixtures import LogCapture
from student.helpers import get_next_url_for_login_page
from openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration_context
LOGGER_NAME = "student.helpers"
@ddt.ddt
class TestLoginHelper(TestCase):
"""Test login helper methods."""
static_url = settings.STATIC_URL
def setUp(self):
super(TestLoginHelper, self).setUp()
self.request = RequestFactory()
@staticmethod
def _add_session(request):
"""Annotate the request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
@ddt.data(
(logging.WARNING, "WARNING", "https://www.amazon.com", "text/html", None,
"Unsafe redirect parameter detected after login page: u'https://www.amazon.com'"),
(logging.WARNING, "WARNING", "testserver/edx.org/images/logo", "text/html", None,
"Redirect to theme content detected after login page: u'testserver/edx.org/images/logo'"),
(logging.INFO, "INFO", "favicon.ico", "image/*", "test/agent",
"Redirect to non html content 'image/*' detected from 'test/agent' after login page: u'favicon.ico'"),
(logging.WARNING, "WARNING", "https://www.test.com/test.jpg", "image/*", None,
"Unsafe redirect parameter detected after login page: u'https://www.test.com/test.jpg'"),
(logging.INFO, "INFO", static_url + "dummy.png", "image/*", "test/agent",
"Redirect to non html content 'image/*' detected from 'test/agent' after login page: u'" + static_url +
"dummy.png" + "'"),
(logging.WARNING, "WARNING", "test.png", "text/html", None,
"Redirect to url path with specified filed type 'image/png' not allowed: u'test.png'"),
(logging.WARNING, "WARNING", static_url + "dummy.png", "text/html", None,
"Redirect to url path with specified filed type 'image/png' not allowed: u'" + static_url + "dummy.png" + "'"),
)
@ddt.unpack
def test_next_failures(self, log_level, log_name, unsafe_url, http_accept, user_agent, expected_log):
""" Test unsafe next parameter """
with LogCapture(LOGGER_NAME, level=log_level) as logger:
req = self.request.get(reverse("login") + "?next={url}".format(url=unsafe_url))
req.META["HTTP_ACCEPT"] = http_accept # pylint: disable=no-member
req.META["HTTP_USER_AGENT"] = user_agent # pylint: disable=no-member
get_next_url_for_login_page(req)
logger.check(
(LOGGER_NAME, log_name, expected_log)
)
@ddt.data(
('/dashboard', 'testserver'),
('https://edx.org/courses', 'edx.org'),
('https://test.edx.org/courses', 'edx.org'),
('https://test2.edx.org/courses', 'edx.org'),
)
@ddt.unpack
@override_settings(LOGIN_REDIRECT_WHITELIST=['test.edx.org', 'test2.edx.org'])
def test_safe_next(self, next_url, host):
""" Test safe next parameter """
req = self.request.get(reverse("login") + "?next={url}".format(url=next_url), HTTP_HOST=host)
req.META["HTTP_ACCEPT"] = "text/html" # pylint: disable=no-member
next_page = get_next_url_for_login_page(req)
self.assertEqual(next_page, next_url)
@patch('student.helpers.third_party_auth.pipeline.get')
@ddt.data(
# Test requests outside the TPA pipeline - tpa_hint should be added.
(None, '/dashboard', '/dashboard', False),
('', '/dashboard', '/dashboard', False),
('', '/dashboard?tpa_hint=oa2-google-oauth2', '/dashboard?tpa_hint=oa2-google-oauth2', False),
('saml-idp', '/dashboard', '/dashboard?tpa_hint=saml-idp', False),
# THIRD_PARTY_AUTH_HINT can be overridden via the query string
('saml-idp', '/dashboard?tpa_hint=oa2-google-oauth2', '/dashboard?tpa_hint=oa2-google-oauth2', False),
# Test requests inside the TPA pipeline - tpa_hint should not be added, preventing infinite loop.
(None, '/dashboard', '/dashboard', True),
('', '/dashboard', '/dashboard', True),
('', '/dashboard?tpa_hint=oa2-google-oauth2', '/dashboard?tpa_hint=oa2-google-oauth2', True),
('saml-idp', '/dashboard', '/dashboard', True),
# OK to leave tpa_hint overrides in place.
('saml-idp', '/dashboard?tpa_hint=oa2-google-oauth2', '/dashboard?tpa_hint=oa2-google-oauth2', True),
)
@ddt.unpack
def test_third_party_auth_hint(self, tpa_hint, next_url, expected_url, running_pipeline, mock_running_pipeline):
mock_running_pipeline.return_value = running_pipeline
def validate_login():
req = self.request.get(reverse("login") + "?next={url}".format(url=next_url))
req.META["HTTP_ACCEPT"] = "text/html" # pylint: disable=no-member
self._add_session(req)
next_page = get_next_url_for_login_page(req)
self.assertEqual(next_page, expected_url)
with override_settings(FEATURES=dict(settings.FEATURES, THIRD_PARTY_AUTH_HINT=tpa_hint)):
validate_login()
with with_site_configuration_context(configuration=dict(THIRD_PARTY_AUTH_HINT=tpa_hint)):
validate_login()
| agpl-3.0 |
liorvh/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/main_unittest.py | 124 | 3472 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from main import change_directory
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.logtesting import LogTesting
class ChangeDirectoryTest(unittest.TestCase):
_original_directory = "/original"
_checkout_root = "/WebKit"
def setUp(self):
self._log = LogTesting.setUp(self)
self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
def tearDown(self):
self._log.tearDown()
def _change_directory(self, paths, checkout_root):
return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
def _assert_result(self, actual_return_value, expected_return_value,
expected_log_messages, expected_current_directory):
self.assertEqual(actual_return_value, expected_return_value)
self._log.assertMessages(expected_log_messages)
self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
def test_paths_none(self):
paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
self._assert_result(paths, None, [], self._checkout_root)
def test_paths_convertible(self):
paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
def test_with_scm_paths_unconvertible(self):
paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
log_messages = [
"""WARNING: Path-dependent style checks may not work correctly:
One of the given paths is outside the WebKit checkout of the current
working directory:
Path: /outside/foo2.txt
Checkout root: /WebKit
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""]
self._assert_result(paths, paths, log_messages, self._original_directory)
| bsd-3-clause |
joernhees/scikit-learn | sklearn/metrics/cluster/supervised.py | 25 | 31477 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
andreparrish/python-for-android | python-modules/twisted/twisted/scripts/test/test_tap2rpm.py | 56 | 12489 | # Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.scripts.tap2rpm}.
"""
import os
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python import procutils
from twisted.python.failure import Failure
from twisted.internet import utils
from twisted.scripts import tap2rpm
# When we query the RPM metadata, we get back a string we'll have to parse, so
# we'll use suitably rare delimiter characters to split on. Luckily, ASCII
# defines some for us!
RECORD_SEPARATOR = "\x1E"
UNIT_SEPARATOR = "\x1F"
def _makeRPMs(tapfile=None, maintainer=None, protocol=None, description=None,
longDescription=None, setVersion=None, rpmfile=None, type_=None):
"""
Helper function to invoke tap2rpm with the given parameters.
"""
args = []
if not tapfile:
tapfile = "dummy-tap-file"
handle = open(tapfile, "w")
handle.write("# Dummy TAP file\n")
handle.close()
args.extend(["--quiet", "--tapfile", tapfile])
if maintainer:
args.extend(["--maintainer", maintainer])
if protocol:
args.extend(["--protocol", protocol])
if description:
args.extend(["--description", description])
if longDescription:
args.extend(["--long_description", longDescription])
if setVersion:
args.extend(["--set-version", setVersion])
if rpmfile:
args.extend(["--rpmfile", rpmfile])
if type_:
args.extend(["--type", type_])
return tap2rpm.run(args)
def _queryRPMTags(rpmfile, taglist):
"""
Helper function to read the given header tags from the given RPM file.
Returns a Deferred that fires with dictionary mapping a tag name to a list
of the associated values in the RPM header. If a tag has only a single
value in the header (like NAME or VERSION), it will be returned as a 1-item
list.
Run "rpm --querytags" to see what tags can be queried.
"""
# Build a query format string that will return appropriately delimited
# results. Every field is treated as an array field, so single-value tags
# like VERSION will be returned as 1-item lists.
queryFormat = RECORD_SEPARATOR.join([
"[%%{%s}%s]" % (tag, UNIT_SEPARATOR) for tag in taglist
])
def parseTagValues(output):
res = {}
for tag, values in zip(taglist, output.split(RECORD_SEPARATOR)):
values = values.strip(UNIT_SEPARATOR).split(UNIT_SEPARATOR)
res[tag] = values
return res
def checkErrorResult(failure):
# The current rpm packages on Debian and Ubuntu don't properly set up
# the RPM database, which causes rpm to print a harmless warning to
# stderr. Unfortunately, .getProcessOutput() assumes all warnings are
# catastrophic and panics whenever it sees one.
#
# See also:
# http://twistedmatrix.com/trac/ticket/3292#comment:42
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=551669
# http://rpm.org/ticket/106
failure.trap(IOError)
# Depending on kernel scheduling, we might read the whole error
# message, or only the first few bytes.
if str(failure.value).startswith("got stderr: 'error: "):
newFailure = Failure(SkipTest("rpm is missing its package "
"database. Run 'sudo rpm -qa > /dev/null' to create one."))
else:
# Not the exception we were looking for; we should report the
# original failure.
newFailure = failure
# We don't want to raise the exception right away; we want to wait for
# the process to exit, otherwise we'll get extra useless errors
# reported.
d = failure.value.processEnded
d.addBoth(lambda _: newFailure)
return d
d = utils.getProcessOutput("rpm",
("-q", "--queryformat", queryFormat, "-p", rpmfile))
d.addCallbacks(parseTagValues, checkErrorResult)
return d
class TestTap2RPM(TestCase):
def setUp(self):
return self._checkForRpmbuild()
def _checkForRpmbuild(self):
"""
tap2rpm requires rpmbuild; skip tests if rpmbuild is not present.
"""
if not procutils.which("rpmbuild"):
raise SkipTest("rpmbuild must be present to test tap2rpm")
def _makeTapFile(self, basename="dummy"):
"""
Make a temporary .tap file and returns the absolute path.
"""
path = basename + ".tap"
handle = open(path, "w")
handle.write("# Dummy .tap file")
handle.close()
return path
def _verifyRPMTags(self, rpmfile, **tags):
"""
Check the given file has the given tags set to the given values.
"""
d = _queryRPMTags(rpmfile, tags.keys())
d.addCallback(self.assertEquals, tags)
return d
def test_optionDefaults(self):
"""
Commandline options should default to sensible values.
"sensible" here is defined as "the same values that previous versions
defaulted to".
"""
config = tap2rpm.MyOptions()
config.parseOptions([])
self.assertEquals(config['tapfile'], 'twistd.tap')
self.assertEquals(config['maintainer'], 'tap2rpm')
self.assertEquals(config['protocol'], 'twistd')
self.assertEquals(config['description'], 'A TCP server for twistd')
self.assertEquals(config['long_description'],
'Automatically created by tap2rpm')
self.assertEquals(config['set-version'], '1.0')
self.assertEquals(config['rpmfile'], 'twisted-twistd')
self.assertEquals(config['type'], 'tap')
self.assertEquals(config['quiet'], False)
self.assertEquals(config['twistd_option'], 'file')
self.assertEquals(config['release-name'], 'twisted-twistd-1.0')
def test_protocolCalculatedFromTapFile(self):
"""
The protocol name defaults to a value based on the tapfile value.
"""
config = tap2rpm.MyOptions()
config.parseOptions(['--tapfile', 'pancakes.tap'])
self.assertEquals(config['tapfile'], 'pancakes.tap')
self.assertEquals(config['protocol'], 'pancakes')
def test_optionsDefaultToProtocolValue(self):
"""
Many options default to a value calculated from the protocol name.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
'--tapfile', 'sausages.tap',
'--protocol', 'eggs',
])
self.assertEquals(config['tapfile'], 'sausages.tap')
self.assertEquals(config['maintainer'], 'tap2rpm')
self.assertEquals(config['protocol'], 'eggs')
self.assertEquals(config['description'], 'A TCP server for eggs')
self.assertEquals(config['long_description'],
'Automatically created by tap2rpm')
self.assertEquals(config['set-version'], '1.0')
self.assertEquals(config['rpmfile'], 'twisted-eggs')
self.assertEquals(config['type'], 'tap')
self.assertEquals(config['quiet'], False)
self.assertEquals(config['twistd_option'], 'file')
self.assertEquals(config['release-name'], 'twisted-eggs-1.0')
def test_releaseNameDefaultsToRpmfileValue(self):
"""
The release-name option is calculated from rpmfile and set-version.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
"--rpmfile", "beans",
"--set-version", "1.2.3",
])
self.assertEquals(config['release-name'], 'beans-1.2.3')
def test_basicOperation(self):
"""
Calling tap2rpm should produce an RPM and SRPM with default metadata.
"""
basename = "frenchtoast"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename))
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
))
return d
def test_protocolOverride(self):
"""
Setting 'protocol' should change the name of the resulting package.
"""
basename = "acorn"
protocol = "banana"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
protocol=protocol)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
))
return d
def test_rpmfileOverride(self):
"""
Setting 'rpmfile' should change the name of the resulting package.
"""
basename = "cherry"
rpmfile = "donut"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
rpmfile=rpmfile)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
))
return d
def test_descriptionOverride(self):
"""
Setting 'description' should change the SUMMARY tag.
"""
description = "eggplant"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
description=description)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
SUMMARY=[description],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
SUMMARY=[description],
))
return d
def test_longDescriptionOverride(self):
"""
Setting 'longDescription' should change the DESCRIPTION tag.
"""
longDescription = "fig"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
longDescription=longDescription)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
DESCRIPTION=[longDescription],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
DESCRIPTION=[longDescription],
))
return d
def test_setVersionOverride(self):
"""
Setting 'setVersion' should change the RPM's version info.
"""
version = "123.456"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
setVersion=version)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
VERSION=["123.456"],
RELEASE=["1"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
VERSION=["123.456"],
RELEASE=["1"],
))
return d
def test_tapInOtherDirectory(self):
"""
tap2rpm handles tapfiles outside the current directory.
"""
# Make a tapfile outside the current directory.
tempdir = self.mktemp()
os.mkdir(tempdir)
tapfile = self._makeTapFile(os.path.join(tempdir, "bacon"))
# Try and make an RPM from that tapfile.
_makeRPMs(tapfile=tapfile)
| apache-2.0 |
andreparrish/python-for-android | python3-alpha/python3-src/Lib/xml/etree/ElementTree.py | 46 | 57023 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<QName %r>' % (self.text,)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
def __ne__(self, other):
if isinstance(other, QName):
return self.text != other.text
return self.text != other
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8 or Unicode. None is default.
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif encoding == str: # lxml.etree compatibility.
encoding = "unicode"
else:
encoding = encoding.lower()
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
if encoding != "unicode":
file = open(file_or_filename, "wb")
else:
file = open(file_or_filename, "w")
if encoding != "unicode":
def write(text):
try:
return file.write(text.encode(encoding,
"xmlcharrefreplace"))
except (TypeError, AttributeError):
_raise_serialization_error(text)
else:
write = file.write
if method == "xml" and (xml_declaration or
(xml_declaration is None and
encoding not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if encoding == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % declared_encoding)
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces)
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements. If encoding is "unicode", the return type is a string;
# otherwise it is a bytes array.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An (optionally) encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
if encoding in (str, "unicode"):
return "".join(data)
else:
return b"".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. If encoding is False, the string is returned as a
# sequence of string fragments; otherwise it is a sequence of
# bytestrings.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
class _IterParseIterator:
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def __next__(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser:
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = value
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = attrib_in[i+1]
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(text)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(data)
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(target, data)
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| apache-2.0 |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/contextlib.py | 50 | 4121 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from functools import wraps
from warnings import warn
__all__ = ["contextmanager", "closing", "ContextDecorator"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows otherwise one-shot context managers like
_GeneratorContextManager to support use as
decorators via implicit recreation.
Note: this is a private interface just for _GCM in 3.2 but will be
renamed and documented for third party use in 3.3
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
| apache-2.0 |
teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_sundry.py | 64 | 3093 | """Do a minimal test of all the modules that aren't otherwise tested."""
from test import test_support
import sys
import unittest
class TestUntestedModules(unittest.TestCase):
def test_at_least_import_untested_modules(self):
with test_support.check_warnings(quiet=True):
import CGIHTTPServer
import audiodev
import bdb
import cgitb
import code
import compileall
import distutils.bcppcompiler
import distutils.ccompiler
import distutils.cygwinccompiler
import distutils.emxccompiler
import distutils.filelist
if sys.platform.startswith('win'):
import distutils.msvccompiler
import distutils.text_file
import distutils.unixccompiler
import distutils.command.bdist_dumb
if sys.platform.startswith('win'):
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
import distutils.command.bdist_wininst
import distutils.command.build_clib
import distutils.command.build_ext
import distutils.command.build
import distutils.command.clean
import distutils.command.config
import distutils.command.install_data
import distutils.command.install_egg_info
import distutils.command.install_headers
import distutils.command.install_lib
import distutils.command.register
import distutils.command.sdist
import distutils.command.upload
import encodings
import formatter
import getpass
import htmlentitydefs
import ihooks
import imghdr
import imputil
import keyword
import linecache
import macurl2path
import mailcap
import mimify
import nntplib
import nturl2path
import opcode
import os2emxpath
import pdb
import posixfile
import pstats
import py_compile
import rexec
import sched
import sndhdr
import statvfs
import stringold
import sunau
import sunaudio
import symbol
import tabnanny
import timeit
import toaiff
import token
try:
import tty # not available on Windows
except ImportError:
if test_support.verbose:
print "skipping tty"
# Can't test the "user" module -- if the user has a ~/.pythonrc.py, it
# can screw up all sorts of things (esp. if it prints!).
#import user
import webbrowser
import xml
def test_main():
test_support.run_unittest(TestUntestedModules)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
henryiii/plumbum | tests/test_local.py | 2 | 21996 | import os
import unittest
import sys
import signal
import time
from plumbum import (local, LocalPath, FG, BG, TF, RETCODE, ERROUT,
CommandNotFound, ProcessExecutionError, ProcessTimedOut)
from plumbum.lib import six, IS_WIN32
from plumbum.fs.atomic import AtomicFile, AtomicCounterFile, PidFile
from plumbum.path import RelativePath
import plumbum
from plumbum._testtools import (skipIf, skip_on_windows,
skip_without_tty, skip_without_chown)
try:
import pathlib
except ImportError:
pathlib = None
class LocalPathTest(unittest.TestCase):
def setUp(self):
self.longpath = LocalPath("/some/long/path/to/file.txt")
def test_name(self):
name = self.longpath.name
self.assertTrue(isinstance(name, six.string_types))
self.assertEqual("file.txt", str(name))
def test_dirname(self):
name = self.longpath.dirname
self.assertTrue(isinstance(name, LocalPath))
self.assertEqual("/some/long/path/to", str(name).replace("\\", "/"))
def test_uri(self):
self.assertEqual("file:///some/long/path/to/file.txt", self.longpath.as_uri())
@skip_without_chown
def test_chown(self):
with local.tempdir() as dir:
p = dir / "foo.txt"
p.write(six.b("hello"))
self.assertEqual(p.uid, os.getuid())
self.assertEqual(p.gid, os.getgid())
p.chown(p.uid.name)
self.assertEqual(p.uid, os.getuid())
def test_split(self):
p = local.path("/var/log/messages")
self.assertEqual(p.split(), ["var", "log", "messages"])
def test_suffix(self):
p1 = self.longpath
p2 = local.path("file.tar.gz")
self.assertEqual(p1.suffix, ".txt")
self.assertEqual(p1.suffixes, [".txt"])
self.assertEqual(p2.suffix, ".gz")
self.assertEqual(p2.suffixes, [".tar",".gz"])
self.assertEqual(p1.with_suffix(".tar.gz"), local.path("/some/long/path/to/file.tar.gz"))
self.assertEqual(p2.with_suffix(".other"), local.path("file.tar.other"))
self.assertEqual(p2.with_suffix(".other", 2), local.path("file.other"))
self.assertEqual(p2.with_suffix(".other", 0), local.path("file.tar.gz.other"))
self.assertEqual(p2.with_suffix(".other", None), local.path("file.other"))
def test_newname(self):
p1 = self.longpath
p2 = local.path("file.tar.gz")
self.assertEqual(p1.with_name("something.tar"), local.path("/some/long/path/to/something.tar"))
self.assertEqual(p2.with_name("something.tar"), local.path("something.tar"))
def test_relative_to(self):
p = local.path("/var/log/messages")
self.assertEqual(p.relative_to("/var/log/messages"), RelativePath([]))
self.assertEqual(p.relative_to("/var/"), RelativePath(["log", "messages"]))
self.assertEqual(p.relative_to("/"), RelativePath(["var", "log", "messages"]))
self.assertEqual(p.relative_to("/var/tmp"), RelativePath(["..", "log", "messages"]))
self.assertEqual(p.relative_to("/opt"), RelativePath(["..", "var", "log", "messages"]))
self.assertEqual(p.relative_to("/opt/lib"), RelativePath(["..", "..", "var", "log", "messages"]))
for src in [local.path("/var/log/messages"), local.path("/var"), local.path("/opt/lib")]:
delta = p.relative_to(src)
self.assertEqual(src + delta, p)
def test_read_write(self):
with local.tempdir() as dir:
f = dir / "test.txt"
text = six.b('hello world\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d').decode("utf8")
f.write(text, "utf8")
text2 = f.read("utf8")
self.assertEqual(text, text2)
def test_parts(self):
parts = self.longpath.parts
self.assertEqual(parts, ('/', 'some', 'long', 'path', 'to', 'file.txt'))
def test_stem(self):
self.assertEqual(self.longpath.stem, "file")
p = local.path("/some/directory")
self.assertEqual(p.stem, "directory")
@skipIf(pathlib is None, "This test requires pathlib")
def test_root_drive(self):
pl_path = pathlib.Path("/some/long/path/to/file.txt").absolute()
self.assertEqual(self.longpath.root, pl_path.root)
self.assertEqual(self.longpath.drive, pl_path.drive)
p_path = local.cwd / "somefile.txt"
pl_path = pathlib.Path("somefile.txt").absolute()
self.assertEqual(p_path.root, pl_path.root)
self.assertEqual(p_path.drive, pl_path.drive)
@skipIf(pathlib is None, "This test requires pathlib")
def test_compare_pathlib(self):
def filename_compare(name):
p = local.path(str(name))
pl = pathlib.Path(str(name)).absolute()
self.assertEqual(str(p), str(pl))
self.assertEqual(p.parts, pl.parts)
self.assertEqual(p.exists(), pl.exists())
self.assertEqual(p.as_uri(), pl.as_uri())
self.assertEqual(str(p.with_suffix('.this')), str(pl.with_suffix('.this')))
self.assertEqual(p.name, pl.name)
filename_compare("/some/long/path/to/file.txt")
filename_compare(local.cwd / "somefile.txt")
filename_compare("/some/long/path/")
filename_compare("/some/long/path")
filename_compare(__file__)
def test_suffix_expected(self):
self.assertEqual(self.longpath.preferred_suffix('.tar'), self.longpath)
self.assertEqual((local.cwd / 'this').preferred_suffix('.txt'), local.cwd / 'this.txt')
class LocalMachineTest(unittest.TestCase):
def test_getattr(self):
pb = plumbum
self.assertEqual(getattr(pb.cmd, 'does_not_exist', 1), 1)
ls_cmd1 = pb.cmd.non_exist1N9 if hasattr(pb.cmd, 'non_exist1N9') else pb.cmd.ls
ls_cmd2 = getattr(pb.cmd, 'non_exist1N9', pb.cmd.ls)
self.assertEqual(str(ls_cmd1), str(local['ls']))
self.assertEqual(str(ls_cmd2), str(local['ls']))
# TODO: This probably fails because of odd ls behavior
@skip_on_windows
def test_imports(self):
from plumbum.cmd import ls
self.assertTrue("test_local.py" in local["ls"]().splitlines())
self.assertTrue("test_local.py" in ls().splitlines())
self.assertRaises(CommandNotFound, lambda: local["non_exist1N9"])
try:
from plumbum.cmd import non_exist1N9 #@UnresolvedImport @UnusedImport
except (ImportError, CommandNotFound):
pass
else:
self.fail("from plumbum.cmd import non_exist1N9")
def test_get(self):
self.assertEqual(str(local['ls']),str(local.get('ls')))
self.assertEqual(str(local['ls']),str(local.get('non_exist1N9', 'ls')))
self.assertRaises(CommandNotFound, lambda: local.get("non_exist1N9"))
self.assertRaises(CommandNotFound, lambda: local.get("non_exist1N9", "non_exist1N8"))
self.assertRaises(CommandNotFound, lambda: local.get("non_exist1N9", "/tmp/non_exist1N8"))
def test_shadowed_by_dir(self):
real_ls = local['ls']
with local.tempdir() as tdir:
with local.cwd(tdir):
ls_dir = tdir / 'ls'
ls_dir.mkdir()
fake_ls = local['ls']
assert fake_ls.executable == real_ls.executable
local.env.path.insert(0, tdir)
fake_ls = local['ls']
del local.env.path[0]
assert fake_ls.executable == real_ls.executable
@skip_on_windows
def test_cwd(self):
from plumbum.cmd import ls
self.assertEqual(local.cwd, os.getcwd())
self.assertTrue("__init__.py" not in ls().splitlines())
with local.cwd("../plumbum"):
self.assertTrue("__init__.py" in ls().splitlines())
self.assertTrue("__init__.py" not in ls().splitlines())
self.assertRaises(OSError, local.cwd.chdir, "../non_exist1N9")
@skip_on_windows
def test_mixing_chdir(self):
self.assertEqual(local.cwd, os.getcwd())
os.chdir('../plumbum')
self.assertEqual(local.cwd, os.getcwd())
os.chdir('../tests')
self.assertEqual(local.cwd, os.getcwd())
@skip_on_windows
def test_path(self):
self.assertFalse((local.cwd / "../non_exist1N9").exists())
self.assertTrue((local.cwd / ".." / "plumbum").is_dir())
# traversal
found = False
for fn in local.cwd / ".." / "plumbum":
if fn.name == "__init__.py":
self.assertTrue(fn.is_file())
found = True
self.assertTrue(found)
# glob'ing
found = False
for fn in local.cwd / ".." // "*/*.rst":
if fn.name == "index.rst":
found = True
self.assertTrue(found)
for fn in local.cwd / ".." // ("*/*.rst", "*./*.html"):
if fn.name == "index.rst":
found = True
self.assertTrue(found)
@skip_on_windows
def test_env(self):
self.assertTrue("PATH" in local.env)
self.assertFalse("FOOBAR72" in local.env)
self.assertRaises(ProcessExecutionError, local.python, "-c", "import os;os.environ['FOOBAR72']")
local.env["FOOBAR72"] = "spAm"
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR72'])").splitlines(), ["spAm"])
with local.env(FOOBAR73 = 1889):
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1889"])
with local.env(FOOBAR73 = 1778):
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1778"])
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1889"])
self.assertRaises(ProcessExecutionError, local.python, "-c", "import os;os.environ['FOOBAR73']")
# path manipulation
self.assertRaises(CommandNotFound, local.which, "dummy-executable")
with local.env():
local.env.path.insert(0, local.cwd / "not-in-path")
p = local.which("dummy-executable")
self.assertEqual(p, local.cwd / "not-in-path" / "dummy-executable")
def test_local(self):
self.assertTrue("plumbum" in str(local.cwd))
self.assertTrue("PATH" in local.env.getdict())
self.assertEqual(local.path("foo"), os.path.join(os.getcwd(), "foo"))
local.which("ls")
local["ls"]
self.assertEqual(local.python("-c", "print ('hi there')").splitlines(), ["hi there"])
@skip_on_windows
def test_piping(self):
from plumbum.cmd import ls, grep
chain = ls | grep["\\.py"]
self.assertTrue("test_local.py" in chain().splitlines())
chain = (ls["-a"] | grep["test"] | grep["local"])
self.assertTrue("test_local.py" in chain().splitlines())
@skip_on_windows
def test_redirection(self):
from plumbum.cmd import cat, ls, grep, rm
chain = (ls | grep["\\.py"]) > "tmp.txt"
chain()
chain2 = (cat < "tmp.txt") | grep["local"]
self.assertTrue("test_local.py" in chain2().splitlines())
rm("tmp.txt")
chain3 = (cat << "this is the\nworld of helloness and\nspam bar and eggs") | grep["hello"]
self.assertTrue("world of helloness and" in chain3().splitlines())
rc, _, err = (grep["-Zq5"] >= "tmp2.txt").run(["-Zq5"], retcode = None)
self.assertEqual(rc, 2)
self.assertFalse(err)
self.assertTrue("usage" in (cat < "tmp2.txt")().lower())
rm("tmp2.txt")
rc, out, _ = (grep["-Zq5"] >= ERROUT).run(["-Zq5"], retcode = None)
self.assertEqual(rc, 2)
self.assertTrue("usage" in out.lower())
@skip_on_windows
def test_popen(self):
from plumbum.cmd import ls
p = ls.popen(["-a"])
out, _ = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertTrue("test_local.py" in out.decode(local.encoding).splitlines())
def test_run(self):
from plumbum.cmd import ls, grep
rc, out, err = (ls | grep["non_exist1N9"]).run(retcode = 1)
self.assertEqual(rc, 1)
def test_timeout(self):
from plumbum.cmd import sleep
self.assertRaises(ProcessTimedOut, sleep, 10, timeout = 5)
@skip_on_windows
def test_iter_lines_timeout(self):
from plumbum.cmd import ping
try:
# Order is important on mac
for i, (out, err) in enumerate(ping["-i", 0.5, "127.0.0.1"].popen().iter_lines(timeout=2)):
print("out:", out)
print("err:", err)
except ProcessTimedOut:
self.assertTrue(i > 3)
else:
self.fail("Expected a timeout")
@skip_on_windows
def test_iter_lines_error(self):
from plumbum.cmd import ls
try:
for i, lines in enumerate(ls["--bla"].popen()):
pass
self.assertEqual(i, 1)
except ProcessExecutionError:
ex = sys.exc_info()[1]
self.assertTrue(ex.stderr.startswith("/bin/ls: unrecognized option '--bla'")
or ex.stderr.startswith("/bin/ls: illegal option -- -"))
else:
self.fail("Expected an execution error")
@skip_on_windows
def test_modifiers(self):
from plumbum.cmd import ls, grep
f = (ls["-a"] | grep["\\.py"]) & BG
f.wait()
self.assertTrue("test_local.py" in f.stdout.splitlines())
command = (ls["-a"] | grep["local"])
command_false = (ls["-a"] | grep["not_a_file_here"])
command & FG
self.assertTrue(command & TF)
self.assertFalse(command_false & TF)
self.assertEqual(command & RETCODE, 0)
self.assertEqual(command_false & RETCODE, 1)
def test_arg_expansion(self):
from plumbum.cmd import ls
args = [ '-l', '-F' ]
ls(*args)
ls[args]
@skip_on_windows
def test_session(self):
sh = local.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
self.assertTrue("test_local.py" in out.splitlines())
sh.run("cd ..")
sh.run("export FOO=17")
out = sh.run("echo $FOO")[1]
self.assertEqual(out.splitlines(), ["17"])
def test_quoting(self):
ssh = local["ssh"]
pwd = local["pwd"]
cmd = ssh["localhost", "cd", "/usr", "&&", ssh["localhost", "cd", "/", "&&",
ssh["localhost", "cd", "/bin", "&&", pwd]]]
self.assertTrue("\"'&&'\"" in " ".join(cmd.formulate(0)))
ls = local['ls']
try:
ls('-a', '') # check that empty strings are rendered correctly
except ProcessExecutionError:
ex = sys.exc_info()[1]
self.assertEqual(ex.argv[-2:], ['-a', ''])
else:
self.fail("Expected `ls` to fail")
def test_tempdir(self):
from plumbum.cmd import cat
with local.tempdir() as dir:
self.assertTrue(dir.is_dir())
data = six.b("hello world")
with open(str(dir / "test.txt"), "wb") as f:
f.write(data)
with open(str(dir / "test.txt"), "rb") as f:
self.assertEqual(f.read(), data)
self.assertFalse(dir.exists())
def test_direct_open_tmpdir(self):
from plumbum.cmd import cat
with local.tempdir() as dir:
self.assertTrue(dir.is_dir())
data = six.b("hello world")
with open(dir / "test.txt", "wb") as f:
f.write(data)
with open(dir / "test.txt", "rb") as f:
self.assertEqual(f.read(), data)
self.assertFalse(dir.exists())
def test_read_write(self):
with local.tempdir() as tmp:
data = six.b("hello world")
(tmp / "foo.txt").write(data)
self.assertEqual((tmp / "foo.txt").read(), data)
def test_links(self):
with local.tempdir() as tmp:
src = tmp / "foo.txt"
dst1 = tmp / "bar.txt"
dst2 = tmp / "spam.txt"
data = six.b("hello world")
src.write(data)
src.link(dst1)
self.assertEqual(data, dst1.read())
src.symlink(dst2)
self.assertEqual(data, dst2.read())
@skip_on_windows
def test_as_user(self):
with local.as_root():
local["date"]()
def test_list_processes(self):
self.assertTrue(list(local.list_processes()))
def test_pgrep(self):
self.assertTrue(list(local.pgrep("python")))
def _generate_sigint(self):
try:
if sys.platform == "win32":
from win32api import GenerateConsoleCtrlEvent
GenerateConsoleCtrlEvent(0, 0) # send Ctrl+C to current TTY
else:
os.kill(0, signal.SIGINT)
time.sleep(1)
except KeyboardInterrupt:
pass
else:
self.fail("Expected KeyboardInterrupt")
@skip_without_tty
@skip_on_windows
def test_same_sesion(self):
from plumbum.cmd import sleep
p = sleep.popen([1000])
self.assertTrue(p.poll() is None)
self._generate_sigint()
time.sleep(1)
self.assertTrue(p.poll() is not None)
@skip_without_tty
def test_new_session(self):
from plumbum.cmd import sleep
p = sleep.popen([1000], new_session = True)
self.assertTrue(p.poll() is None)
self._generate_sigint()
time.sleep(1)
self.assertTrue(p.poll() is None)
p.terminate()
def test_local_daemon(self):
from plumbum.cmd import sleep
proc = local.daemonic_popen(sleep[5])
try:
os.waitpid(proc.pid, 0)
except OSError:
pass
else:
self.fail("I shouldn't have any children by now -- they are daemons!")
proc.wait()
@skip_on_windows
def test_atomic_file(self):
af1 = AtomicFile("tmp.txt")
af2 = AtomicFile("tmp.txt")
af1.write_atomic(six.b("foo"))
af2.write_atomic(six.b("bar"))
self.assertEqual(af1.read_atomic(), six.b("bar"))
self.assertEqual(af2.read_atomic(), six.b("bar"))
local.path("tmp.txt").delete()
@skip_on_windows
def test_atomic_file2(self):
af = AtomicFile("tmp.txt")
code = """from __future__ import with_statement
from plumbum.fs.atomic import AtomicFile
af = AtomicFile("tmp.txt")
try:
with af.locked(blocking = False):
raise ValueError("this should have failed")
except (OSError, IOError):
print("already locked")
"""
with af.locked():
output = local.python("-c", code)
self.assertEqual(output.strip(), "already locked")
local.path("tmp.txt").delete()
@skip_on_windows
def test_pid_file(self):
code = """from __future__ import with_statement
from plumbum.fs.atomic import PidFile, PidFileTaken
try:
with PidFile("mypid"):
raise ValueError("this should have failed")
except PidFileTaken:
print("already locked")
"""
with PidFile("mypid"):
output = local.python("-c", code)
self.assertEqual(output.strip(), "already locked")
local.path("mypid").delete()
@skip_on_windows
def test_atomic_counter(self):
local.path("counter").delete()
num_of_procs = 20
num_of_increments = 20
code = """from plumbum.fs.atomic import AtomicCounterFile
import time
time.sleep(0.2)
afc = AtomicCounterFile.open("counter")
for _ in range(%s):
print(afc.next())
time.sleep(0.1)
""" % (num_of_increments,)
procs = []
for _ in range(num_of_procs):
procs.append(local.python["-c", code].popen())
results = []
for p in procs:
out, _ = p.communicate()
self.assertEqual(p.returncode, 0)
results.extend(int(num) for num in out.splitlines())
self.assertEqual(len(results), num_of_procs * num_of_increments)
self.assertEqual(len(set(results)), len(results))
self.assertEqual(min(results), 0)
self.assertEqual(max(results), num_of_procs * num_of_increments - 1)
local.path("counter").delete()
@skip_on_windows
def test_atomic_counter2(self):
local.path("counter").delete()
afc = AtomicCounterFile.open("counter")
self.assertEqual(afc.next(), 0)
self.assertEqual(afc.next(), 1)
self.assertEqual(afc.next(), 2)
self.assertRaises(TypeError, afc.reset, "hello")
afc.reset(70)
self.assertEqual(afc.next(), 70)
self.assertEqual(afc.next(), 71)
self.assertEqual(afc.next(), 72)
local.path("counter").delete()
@skip_on_windows
def test_bound_env(self):
try:
from plumbum.cmd import printenv
except CommandNotFound:
self.skipTest("printenv is missing")
with local.env(FOO = "hello"):
self.assertEqual(printenv.with_env(BAR = "world")("FOO"), "hello\n")
self.assertEqual(printenv.with_env(BAR = "world")("BAR"), "world\n")
self.assertEqual(printenv.with_env(FOO = "sea", BAR = "world")("FOO"), "sea\n")
self.assertEqual(printenv("FOO"), "hello\n")
def test_nesting_lists_as_argv(self):
from plumbum.cmd import ls
c = ls["-l", ["-a", "*.py"]]
self.assertEqual(c.formulate()[1:], ['-l', '-a', '*.py'])
def test_contains(self):
self.assertTrue("ls" in local, "Expected to find `ls`")
def test_issue_139(self):
LocalPath(local.cwd)
def test_pipeline_failure(self):
from plumbum.cmd import ls, head
self.assertRaises(ProcessExecutionError, (ls["--no-such-option"] | head))
if __name__ == "__main__":
unittest.main()
| mit |
cyc805/VM | src/create-module.py | 21 | 9326 | #! /usr/bin/env python
import sys
from optparse import OptionParser
import os
WSCRIPT_TEMPLATE = '''# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# def options(opt):
# pass
# def configure(conf):
# conf.check_nonfatal(header_name='stdint.h', define_name='HAVE_STDINT_H')
def build(bld):
module = bld.create_ns3_module(%(MODULE)r, ['core'])
module.source = [
'model/%(MODULE)s.cc',
'helper/%(MODULE)s-helper.cc',
]
module_test = bld.create_ns3_module_test_library('%(MODULE)s')
module_test.source = [
'test/%(MODULE)s-test-suite.cc',
]
headers = bld.new_task_gen(features=['ns3header'])
headers.module = %(MODULE)r
headers.source = [
'model/%(MODULE)s.h',
'helper/%(MODULE)s-helper.h',
]
if bld.env.ENABLE_EXAMPLES:
bld.add_subdirs('examples')
# bld.ns3_python_bindings()
'''
MODEL_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "%(MODULE)s.h"
namespace ns3 {
/* ... */
}
'''
MODEL_H_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#ifndef %(INCLUDE_GUARD)s
#define %(INCLUDE_GUARD)s
namespace ns3 {
/* ... */
}
#endif /* %(INCLUDE_GUARD)s */
'''
HELPER_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "%(MODULE)s-helper.h"
namespace ns3 {
/* ... */
}
'''
HELPER_H_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#ifndef %(INCLUDE_GUARD)s
#define %(INCLUDE_GUARD)s
#include "ns3/%(MODULE)s.h"
namespace ns3 {
/* ... */
}
#endif /* %(INCLUDE_GUARD)s */
'''
EXAMPLES_WSCRIPT_TEMPLATE = '''# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_program('%(MODULE)s-example', [%(MODULE)r])
obj.source = '%(MODULE)s-example.cc'
'''
EXAMPLE_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
#include "ns3/core-module.h"
#include "ns3/%(MODULE)s-helper.h"
using namespace ns3;
int
main (int argc, char *argv[])
{
bool verbose = true;
CommandLine cmd;
cmd.AddValue ("verbose", "Tell application to log if true", verbose);
cmd.Parse (argc,argv);
/* ... */
Simulator::Run ();
Simulator::Destroy ();
return 0;
}
'''
TEST_CC_TEMPLATE = '''/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
// Include a header file from your module to test.
#include "ns3/%(MODULE)s.h"
// An essential include is test.h
#include "ns3/test.h"
// Do not put your test classes in namespace ns3. You may find it useful
// to use the using directive to access the ns3 namespace directly
using namespace ns3;
// This is an example TestCase.
class %(CAPITALIZED)sTestCase1 : public TestCase
{
public:
%(CAPITALIZED)sTestCase1 ();
virtual ~%(CAPITALIZED)sTestCase1 ();
private:
virtual void DoRun (void);
};
// Add some help text to this case to describe what it is intended to test
%(CAPITALIZED)sTestCase1::%(CAPITALIZED)sTestCase1 ()
: TestCase ("%(CAPITALIZED)s test case (does nothing)")
{
}
// This destructor does nothing but we include it as a reminder that
// the test case should clean up after itself
%(CAPITALIZED)sTestCase1::~%(CAPITALIZED)sTestCase1 ()
{
}
//
// This method is the pure virtual method from class TestCase that every
// TestCase must implement
//
void
%(CAPITALIZED)sTestCase1::DoRun (void)
{
// A wide variety of test macros are available in src/core/test.h
NS_TEST_ASSERT_MSG_EQ (true, true, "true doesn't equal true for some reason");
// Use this one for floating point comparisons
NS_TEST_ASSERT_MSG_EQ_TOL (0.01, 0.01, 0.001, "Numbers are not equal within tolerance");
}
// The TestSuite class names the TestSuite, identifies what type of TestSuite,
// and enables the TestCases to be run. Typically, only the constructor for
// this class must be defined
//
class %(CAPITALIZED)sTestSuite : public TestSuite
{
public:
%(CAPITALIZED)sTestSuite ();
};
%(CAPITALIZED)sTestSuite::%(CAPITALIZED)sTestSuite ()
: TestSuite ("%(MODULE)s", UNIT)
{
AddTestCase (new %(CAPITALIZED)sTestCase1);
}
// Do not forget to allocate an instance of this TestSuite
static %(CAPITALIZED)sTestSuite %(MODULE)sTestSuite;
'''
DOC_RST_TEMPLATE = '''Example Module Documentation
----------------------------
.. heading hierarchy:
------------- Chapter
************* Section (#.#)
============= Subsection (#.#.#)
############# Paragraph (no number)
This is a suggested outline for adding new module documentation to ns-3.
See ``src/click/doc/click.rst`` for an example.
The introductory paragraph is for describing what this code is trying to
model.
Model Description
*****************
The source code for the new module lives in the directory ``src/%(MODULE)s``.
Add here a basic description of what is being modeled.
Design
======
Briefly describe the software design of the model and how it fits into
the existing ns-3 architecture.
Scope and Limitations
=====================
What can the model do? What can it not do? Please use this section to
describe the scope and limitations of the model.
References
==========
Add academic citations here, such as if you published a paper on this
model, or if readers should read a particular specification or other work.
Usage
*****
This section is principally concerned with the usage of your model, using
the public API. Focus first on most common usage patterns, then go
into more advanced topics.
Building New Module
===================
Include this subsection only if there are special build instructions or
platform limitations.
Helpers
=======
What helper API will users typically use? Describe it here.
Attributes
==========
What classes hold attributes, and what are the key ones worth mentioning?
Output
======
What kind of data does the model generate? What are the key trace
sources? What kind of logging output can be enabled?
Advanced Usage
==============
Go into further details (such as using the API outside of the helpers)
in additional sections, as needed.
Examples
========
What examples using this new code are available? Describe them here.
Troubleshooting
===============
Add any tips for avoiding pitfalls, etc.
Validation
**********
Describe how the model has been tested/validated. What tests run in the
test suite? How much API and code is covered by the tests? Again,
references to outside published work may help here.
'''
def main(argv):
parser = OptionParser(usage=("Usage: %prog [options] modulename\n"
"Utility script to create a basic template for a new ns-3 module"))
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
return 1
modname = args[0]
assert os.path.sep not in modname
moduledir = os.path.join(os.path.dirname(__file__), modname)
if os.path.exists(moduledir):
print >> sys.stderr, "Module %r already exists" % (modname,)
return 2
os.mkdir(moduledir)
wscript = file(os.path.join(moduledir, "wscript"), "wt")
wscript.write(WSCRIPT_TEMPLATE % dict(MODULE=modname))
wscript.close()
#
# model
#
modeldir = os.path.join(moduledir, "model")
os.mkdir(modeldir)
model_cc = file(os.path.join(moduledir, "model", "%s.cc" % modname), "wt")
model_cc.write(MODEL_CC_TEMPLATE % dict(MODULE=modname))
model_cc.close()
model_h = file(os.path.join(moduledir, "model", "%s.h" % modname), "wt")
model_h.write(MODEL_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD="__%s_H__" % (modname.upper()),))
model_h.close()
#
# test
#
testdir = os.path.join(moduledir, "test")
os.mkdir(testdir)
test_cc = file(os.path.join(moduledir, "test", "%s-test-suite.cc" % modname), "wt")
test_cc.write(TEST_CC_TEMPLATE % dict(MODULE=modname,CAPITALIZED=modname.capitalize()))
test_cc.close()
#
# helper
#
helperdir = os.path.join(moduledir, "helper")
os.mkdir(helperdir)
helper_cc = file(os.path.join(moduledir, "helper", "%s-helper.cc" % modname), "wt")
helper_cc.write(HELPER_CC_TEMPLATE % dict(MODULE=modname))
helper_cc.close()
helper_h = file(os.path.join(moduledir, "helper", "%s-helper.h" % modname), "wt")
helper_h.write(HELPER_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD="__%s_HELPER_H__" % (modname.upper()),))
helper_h.close()
#
# examples
#
examplesdir = os.path.join(moduledir, "examples")
os.mkdir(examplesdir)
examples_wscript = file(os.path.join(examplesdir, "wscript"), "wt")
examples_wscript.write(EXAMPLES_WSCRIPT_TEMPLATE % dict(MODULE=modname))
examples_wscript.close()
example_cc = file(os.path.join(moduledir, "examples", "%s-example.cc" % modname), "wt")
example_cc.write(EXAMPLE_CC_TEMPLATE % dict(MODULE=modname))
example_cc.close()
#
# doc
#
docdir = os.path.join(moduledir, "doc")
os.mkdir(docdir)
doc_rst = file(os.path.join(moduledir, "doc", "%s.rst" % modname), "wt")
doc_rst.write(DOC_RST_TEMPLATE % dict(MODULE=modname))
doc_rst.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
benkonrath/django-guardian | guardian/__init__.py | 1 | 1073 | """
Implementation of per object permissions for Django.
"""
from __future__ import unicode_literals
from . import checks
try:
from .version import version as __version__
__version__split__ = __version__.split(".")
VERSION = tuple(['1', '5', '7'])
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
except ImportError:
pass
default_app_config = 'guardian.apps.GuardianConfig'
def monkey_patch_user():
from .compat import get_user_model
from .utils import get_anonymous_user
from .models import UserObjectPermission
User = get_user_model()
# Prototype User and Group methods
setattr(User, 'get_anonymous', staticmethod(lambda: get_anonymous_user()))
setattr(User, 'add_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.assign_perm(perm, self, obj))
setattr(User, 'del_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.remove_perm(perm, self, obj))
| bsd-2-clause |
CentreForResearchInAppliedLinguistics/clic | clic/web/models.py | 2 | 9496 | # -*- coding: utf-8 -*-
'''
models.py defines the SQL tables that CLiC uses. These classes
provide a python interface to the SQL database so that you can write python
code that automatically queries the database.
This is heavily dependent on Flask-SQLAlchmey and SQLAlchemy
'''
from sqlalchemy.dialects.postgresql import JSON
from flask.ext.security import UserMixin, RoleMixin
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship, backref
db = SQLAlchemy()
subset_tags = db.Table('subset_tags',
db.Column('subset_id', db.Integer, db.ForeignKey('subsets.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')),
extend_existing=True,
)
class Subset(db.Model):
__tablename__ = 'subsets'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
book = db.Column(db.String(100)) # Bleak House
abbr = db.Column(db.String(10)) # BH
kind = db.Column(db.String(100)) # quotes, non-quotes, suspensions, short-suspensions, etc.
corpus = db.Column(db.String(10)) # dickens or ntc
text = db.Column(db.String)
# tags = relationship('Tag', secondary=subset_tags, backref=db.backref('subsets'))
__mapper_args__ = {
"order_by": [abbr, kind,] # text]
}
def __init__(self, book='', abbr='', kind='', text=''):
self.book = book
self.abbr = abbr
self.kind = kind
self.text = text
def __repr__(self):
return "<Subset(book='%s', abbr='%s', kind='%s', text='%s')>" % (
self.book, self.abbr, self.kind, self.text)
class Tag(db.Model):
__tablename__ = 'tags'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
tag_name = db.Column(db.String(80), nullable=False) # negotiating information, politeness
subset = db.relationship('Subset', secondary=subset_tags, backref=db.backref('tags'))
# one to one relationship:
# FIXME should be FK AND not nullable!
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
owner = db.relationship("User", backref=db.backref("tags"))
def __init__(self, tag_name='', owner=None):
self.tag_name = tag_name
self.owner = owner
def __repr__(self):
# return 'Tag: ' + str(self.name) + '>'
if self.owner:
output = unicode(self.owner.name) + u'-' + unicode(self.tag_name)
return output
return unicode(self.tag_name)
class Note(db.Model):
__tablename__ = 'notes'
id = db.Column(db.Integer, primary_key=True)
note = db.Column(db.String(5000), nullable=False)
# note that the backref should be the name of this class (and not the class this relationship extends to)
subset = db.relationship('Subset', backref=db.backref('notes', order_by=id))
subset_id = db.Column(db.Integer, db.ForeignKey('subsets.id'))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
owner = db.relationship("User", backref=db.backref('notes'))
def __init__(self, note='', owner=None):
self.note = note
self.owner = owner
def __repr__(self):
if self.owner:
output = unicode(self.owner.name) + u'-' + unicode(self.note)
return output
return self.note
class Annotation(db.Model):
__tablename__ = 'annotations'
id = db.Column(db.Integer, primary_key=True)
category_id = db.Column(
db.Integer,
db.ForeignKey('annotation_categories.id', ondelete='CASCADE'),
nullable=True
)
notes = db.Column(db.Text())
public = db.Column(db.Boolean, default=True)
# name = db.Column(db.String())
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime,
server_default=db.func.now(),
onupdate=db.func.now())
location = db.Column(db.String(50))
proxinfo = db.Column(db.String(50))
def __init__(self, url=""):
self.url = url
def __repr__(self):
return '<id {}>'.format(self.id)
class Category(db.Model):
__tablename__ = "annotation_categories"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
approved = db.Column(db.Boolean, default=False)
explanation = db.Column(db.Text())
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime,
server_default=db.func.now(),
onupdate=db.func.now())
annotations = db.relationship('Annotation',
backref='category',
lazy='dynamic')
def __repr__(self):
return '{}'.format(self.name)
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __repr__(self):
return '{}'.format(self.name)
class User(db.Model, UserMixin):
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(35), unique=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
# TODO created_on = db.Column(db.DateTime, server_default=db.func.now())
# TODO updated_on = db.Column(db.DateTime,
# server_default=db.func.now(),
# onupdate=db.func.now())
# Trackable fields for Flask-Security
# last_login_at = db.Column(db.DateTime)
# current_login_at = db.Column(db.DateTime)
# last_login_ip = db.Column(db.String(50))
# current_login_ip = db.Column(db.String(50))
# login_count = db.Column(db.Integer)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __unicode__(self):
return self.name
class List(db.Model):
__tablename__ = 'lists'
id = db.Column(db.Integer, primary_key=True)
# version =
# public = db.Column(db.Boolean, default=False)
# time_created = db.Column(db.DateTime, timezone=True)
# time_modified = db.Column(db.DateTime, timezone=True)
raw_json = db.Column(JSON)
# text_id = Column(Integer, ForeignKey('text.id'))
# text = relationship("Text")
def __init__(self, url, json):
self.url = url
self.raw_json = json
def __repr__(self):
return '<id {}>'.format(self.id)
#class AnnotationB(db.Model):
# """
# 1: one text/hook, many annotations; fk on text/hook -> annotations
# 2: one text/hook, many annotations: fk on annotations -> text/hook
# 3: many texts/hooks, many annotations: m2m
# 4: many annotations, many texts/hook, but each annotation only on one text
#
# Text : parent
#
# annotation : child
#
# or do not denormalize at all, simply add columns for the location of the
# text, the text' json, and the user
# not a good idea
#
# you don't go from user->annotation, but from annotation->user
# so too you go from annotation->text even if text is the more fundamental
# entity and annotation the more concrete.
#
# The idea is that you are most often going to be doing CRUD operations on
# annotations.
#
# But at the same time you want to be able to get all the annotations for
# a specific text.
# """
#
# __tablename__ = 'annotations'
#
# id = db.Column(db.Integer, primary_key=True)
# # or id = string= BH.c4.p2.s1
# url = db.Column(db.String())
# # public = db.Column(db.Boolean, default=False)
# # hook = #TODO
# # time_created = db.Column(db.DateTime, timezone=True)
# # time_modified = db.Column(db.DateTime, timezone=True)
# # raw_json = db.Column(JSON)
# # text_id = Column(Integer, ForeignKey('text.id'))
# # text = relationship("Text")
#
# # result_all = db.Column(JSON)
# # result_no_stop_words = db.Column(JSON)
#
# # def __init__(self, url, result_all, result_no_stop_words):
# def __init__(self, url):
# self.url = url
# # self.result_all = result_all
# # self.result_no_stop_words = result_no_stop_words
#
# def __repr__(self):
# return '<id {}>'.format(self.id)
#
# def check_spelling(self):
# pass
#
# def check_near_neighbours(self):
# pass
#
# def strip_get_parameters(self, url):
# pass
#
# def last_modified(self, url):
# pass
#
#
#class Hook(db.Model):
#
# pass
# # def __init__(self, #TODO):
# # self.a
#
#
#class Text(db.Model):
#
# __tablename__ = 'text'
# id = db.Column(Integer, primary_key=True)
# annotation_id = db.Column(Integer, ForeignKey('annotation.id'))
# annotation = db.relationship("Annotation", backref="annotations")
#
# def check_uniqueness(self):
# pass
| mit |
strint/tensorflow | tensorflow/python/kernel_tests/division_past_test.py | 55 | 2219 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
def check(x, y):
if isinstance(x, ops.Tensor):
x = x.eval()
if isinstance(y, ops.Tensor):
y = y.eval()
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
with self.test_session():
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
if __name__ == "__main__":
test.main()
| apache-2.0 |
tmpkus/photivo | scons-local-2.2.0/SCons/Tool/ilink.py | 14 | 2168 | """SCons.Tool.ilink
Tool-specific initialization for the OS/2 ilink linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ilink.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ilink to an Environment."""
SCons.Tool.createProgBuilder(env)
env['LINK'] = 'ilink'
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK $LINKFLAGS /O:$TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
def exists(env):
return env.Detect('ilink')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
henaras/horizon | tools/install_venv.py | 99 | 2401 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv # noqa
def print_help(venv, root):
help = """
OpenStack development environment setup is complete.
OpenStack development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the OpenStack virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'OpenStack'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
wasn-lab/visual-positioning | cpp/scons/scons-local-2.0.0.final.0/SCons/PathList.py | 34 | 8414 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py 5023 2010/06/14 22:05:46 scons"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList(object):
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
index = p.find('$')
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if index == -1:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(value)
continue
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
return tuple(result)
class PathListCache(object):
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('PathList', _PathList_key))
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
metaplinius/still-lambda | pyglet/graphics/vertexdomain.py | 2 | 30048 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Manage related vertex attributes within a single vertex domain.
A vertex "domain" consists of a set of attribute descriptions that together
describe the layout of one or more vertex buffers which are used together to
specify the vertices in a primitive. Additionally, the domain manages the
buffers used to store the data and will resize them as necessary to accommodate
new vertices.
Domains can optionally be indexed, in which case they also manage a buffer
containing vertex indices. This buffer is grown separately and has no size
relation to the attribute buffers.
Applications can create vertices (and optionally, indices) within a domain
with the `VertexDomain.create` method. This returns a `VertexList`
representing the list of vertices created. The vertex attribute data within
the group can be modified, and the changes will be made to the underlying
buffers automatically.
The entire domain can be efficiently drawn in one step with the
`VertexDomain.draw` method, assuming all the vertices comprise primitives of
the same OpenGL primitive mode.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import re
from pyglet.gl import *
from pyglet.graphics import allocation, vertexattribute, vertexbuffer
_usage_format_re = re.compile(r'''
(?P<attribute>[^/]*)
(/ (?P<usage> static|dynamic|stream|none))?
''', re.VERBOSE)
_gl_usages = {
'static': GL_STATIC_DRAW,
'dynamic': GL_DYNAMIC_DRAW,
'stream': GL_STREAM_DRAW,
'none': GL_STREAM_DRAW_ARB, # Force no VBO
}
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: Sean Anderson
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
def create_attribute_usage(format):
'''Create an attribute and usage pair from a format string. The
format string is as documented in `pyglet.graphics.vertexattribute`, with
the addition of an optional usage component::
usage ::= attribute ( '/' ('static' | 'dynamic' | 'stream' | 'none') )?
If the usage is not given it defaults to 'dynamic'. The usage corresponds
to the OpenGL VBO usage hint, and for ``static`` also indicates a
preference for interleaved arrays. If ``none`` is specified a buffer
object is not created, and vertex data is stored in system memory.
Some examples:
``v3f/stream``
3D vertex position using floats, for stream usage
``c4b/static``
4-byte color attribute, for static usage
:return: attribute, usage
'''
match = _usage_format_re.match(format)
attribute_format = match.group('attribute')
attribute = vertexattribute.create_attribute(attribute_format)
usage = match.group('usage')
if usage:
vbo = not usage == 'none'
usage = _gl_usages[usage]
else:
usage = GL_DYNAMIC_DRAW
vbo = True
return (attribute, usage, vbo)
def create_domain(*attribute_usage_formats):
'''Create a vertex domain covering the given attribute usage formats.
See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return VertexDomain(attribute_usages)
def create_indexed_domain(*attribute_usage_formats):
'''Create an indexed vertex domain covering the given attribute usage
formats. See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return IndexedVertexDomain(attribute_usages)
class VertexDomain(object):
'''Management of a set of vertex lists.
Construction of a vertex domain is usually done with the `create_domain`
function.
'''
_version = 0
_initial_count = 16
def __init__(self, attribute_usages):
self.allocator = allocation.Allocator(self._initial_count)
# If there are any MultiTexCoord attributes, then a TexCoord attribute
# must be converted.
have_multi_texcoord = False
for attribute, _, _ in attribute_usages:
if isinstance(attribute, vertexattribute.MultiTexCoordAttribute):
have_multi_texcoord = True
break
static_attributes = []
attributes = []
self.buffer_attributes = [] # list of (buffer, attributes)
for attribute, usage, vbo in attribute_usages:
if (have_multi_texcoord and
isinstance(attribute, vertexattribute.TexCoordAttribute)):
attribute.convert_to_multi_tex_coord_attribute()
if usage == GL_STATIC_DRAW:
# Group attributes for interleaved buffer
static_attributes.append(attribute)
attributes.append(attribute)
else:
# Create non-interleaved buffer
attributes.append(attribute)
attribute.buffer = vertexbuffer.create_mappable_buffer(
attribute.stride * self.allocator.capacity,
usage=usage, vbo=vbo)
attribute.buffer.element_size = attribute.stride
attribute.buffer.attributes = (attribute,)
self.buffer_attributes.append(
(attribute.buffer, (attribute,)))
# Create buffer for interleaved data
if static_attributes:
vertexattribute.interleave_attributes(static_attributes)
stride = static_attributes[0].stride
buffer = vertexbuffer.create_mappable_buffer(
stride * self.allocator.capacity, usage=GL_STATIC_DRAW)
buffer.element_size = stride
self.buffer_attributes.append(
(buffer, static_attributes))
attributes.extend(static_attributes)
for attribute in static_attributes:
attribute.buffer = buffer
# Create named attributes for each attribute
self.attributes = attributes
self.attribute_names = {}
for attribute in attributes:
if isinstance(attribute, vertexattribute.GenericAttribute):
index = attribute.index
# TODO create a name and use it (e.g. 'generic3')
# XXX this won't migrate; not documented.
if 'generic' not in self.attribute_names:
self.attribute_names['generic'] = {}
assert index not in self.attribute_names['generic'], \
'More than one generic attribute with index %d' % index
self.attribute_names['generic'][index] = attribute
elif isinstance(attribute, vertexattribute.MultiTexCoordAttribute):
# XXX this won't migrate; not documented.
texture = attribute.texture
if 'multi_tex_coords' not in self.attribute_names:
self.attribute_names['multi_tex_coords'] = {}
assert texture not in self.attribute_names['multi_tex_coords'],\
'More than one multi_tex_coord attribute for texture %d' % \
texture
self.attribute_names['multi_tex_coords'][texture] = attribute
else:
name = attribute.plural
assert name not in self.attributes, \
'More than one "%s" attribute given' % name
self.attribute_names[name] = attribute
def __del__(self):
# Break circular refs that Python GC seems to miss even when forced
# collection.
for attribute in self.attributes:
try:
del attribute.buffer
except AttributeError:
pass
def _safe_alloc(self, count):
'''Allocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.alloc(count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.alloc(count)
def _safe_realloc(self, start, count, new_count):
'''Reallocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.realloc(start, count, new_count)
def create(self, count):
'''Create a `VertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create.
:rtype: `VertexList`
'''
start = self._safe_alloc(count)
return VertexList(self, start, count)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], sizes[0])
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
def _is_empty(self):
return not self.allocator.starts
def __repr__(self):
return '<%s@%x %s>' % (self.__class__.__name__, id(self),
self.allocator)
class VertexList(object):
'''A list of vertices within a `VertexDomain`. Use
`VertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count):
# TODO make private
self.domain = domain
self.start = start
self.count = count
def get_size(self):
'''Get the number of vertices in the list.
:rtype: int
'''
return self.count
def get_domain(self):
'''Get the domain this vertex list belongs to.
:rtype: `VertexDomain`
'''
return self.domain
def draw(self, mode):
'''Draw this vertex list in the given OpenGL mode.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
'''
self.domain.draw(mode, self)
def resize(self, count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
'''
new_start = self.domain._safe_realloc(self.start, self.count, count)
if new_start != self.start:
# Copy contents to new location
for attribute in self.domain.attributes:
old = attribute.get_region(attribute.buffer,
self.start, self.count)
new = attribute.get_region(attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.start = new_start
self.count = count
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def delete(self):
'''Delete this group.'''
self.domain.allocator.dealloc(self.start, self.count)
def migrate(self, domain):
'''Move this group from its current domain and add to the specified
one. Attributes on domains must match. (In practice, used to change
parent state of some vertices).
:Parameters:
`domain` : `VertexDomain`
Domain to migrate this vertex list to.
'''
assert list(domain.attribute_names.keys()) == \
list(self.domain.attribute_names.keys()), 'Domain attributes must match.'
new_start = domain._safe_alloc(self.count)
for key, old_attribute in list(self.domain.attribute_names.items()):
old = old_attribute.get_region(old_attribute.buffer,
self.start, self.count)
new_attribute = domain.attribute_names[key]
new = new_attribute.get_region(new_attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.domain.allocator.dealloc(self.start, self.count)
self.domain = domain
self.start = new_start
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def _set_attribute_data(self, i, data):
attribute = self.domain.attributes[i]
# TODO without region
region = attribute.get_region(attribute.buffer, self.start, self.count)
region.array[:] = data
region.invalidate()
# ---
def _get_colors(self):
if (self._colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['colors']
self._colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._colors_cache_version = domain._version
region = self._colors_cache
region.invalidate()
return region.array
def _set_colors(self, data):
self._get_colors()[:] = data
_colors_cache = None
_colors_cache_version = None
colors = property(_get_colors, _set_colors,
doc='''Array of color data.''')
# ---
def _get_fog_coords(self):
if (self._fog_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['fog_coords']
self._fog_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._fog_coords_cache_version = domain._version
region = self._fog_coords_cache
region.invalidate()
return region.array
def _set_fog_coords(self, data):
self._get_fog_coords()[:] = data
_fog_coords_cache = None
_fog_coords_cache_version = None
fog_coords = property(_get_fog_coords, _set_fog_coords,
doc='''Array of fog coordinate data.''')
# ---
def _get_edge_flags(self):
if (self._edge_flags_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['edge_flags']
self._edge_flags_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._edge_flags_cache_version = domain._version
region = self._edge_flags_cache
region.invalidate()
return region.array
def _set_edge_flags(self, data):
self._get_edge_flags()[:] = data
_edge_flags_cache = None
_edge_flags_cache_version = None
edge_flags = property(_get_edge_flags, _set_edge_flags,
doc='''Array of edge flag data.''')
# ---
def _get_normals(self):
if (self._normals_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['normals']
self._normals_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._normals_cache_version = domain._version
region = self._normals_cache
region.invalidate()
return region.array
def _set_normals(self, data):
self._get_normals()[:] = data
_normals_cache = None
_normals_cache_version = None
normals = property(_get_normals, _set_normals,
doc='''Array of normal vector data.''')
# ---
def _get_secondary_colors(self):
if (self._secondary_colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['secondary_colors']
self._secondary_colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._secondary_colors_cache_version = domain._version
region = self._secondary_colors_cache
region.invalidate()
return region.array
def _set_secondary_colors(self, data):
self._get_secondary_colors()[:] = data
_secondary_colors_cache = None
_secondary_colors_cache_version = None
secondary_colors = property(_get_secondary_colors, _set_secondary_colors,
doc='''Array of secondary color data.''')
# ---
_tex_coords_cache = None
_tex_coords_cache_version = None
def _get_tex_coords(self):
if (self._tex_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['tex_coords']
self._tex_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._tex_coords_cache_version = domain._version
region = self._tex_coords_cache
region.invalidate()
return region.array
def _set_tex_coords(self, data):
self._get_tex_coords()[:] = data
tex_coords = property(_get_tex_coords, _set_tex_coords,
doc='''Array of texture coordinate data.''')
# ---
_vertices_cache = None
_vertices_cache_version = None
def _get_vertices(self):
if (self._vertices_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['vertices']
self._vertices_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._vertices_cache_version = domain._version
region = self._vertices_cache
region.invalidate()
return region.array
def _set_vertices(self, data):
self._get_vertices()[:] = data
vertices = property(_get_vertices, _set_vertices,
doc='''Array of vertex coordinate data.''')
class IndexedVertexDomain(VertexDomain):
'''Management of a set of indexed vertex lists.
Construction of an indexed vertex domain is usually done with the
`create_indexed_domain` function.
'''
_initial_index_count = 16
def __init__(self, attribute_usages, index_gl_type=GL_UNSIGNED_INT):
super(IndexedVertexDomain, self).__init__(attribute_usages)
self.index_allocator = allocation.Allocator(self._initial_index_count)
self.index_gl_type = index_gl_type
self.index_c_type = vertexattribute._c_types[index_gl_type]
self.index_element_size = ctypes.sizeof(self.index_c_type)
self.index_buffer = vertexbuffer.create_mappable_buffer(
self.index_allocator.capacity * self.index_element_size,
target=GL_ELEMENT_ARRAY_BUFFER)
def _safe_index_alloc(self, count):
'''Allocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.alloc(count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.alloc(count)
def _safe_index_realloc(self, start, count, new_count):
'''Reallocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.realloc(start, count, new_count)
def create(self, count, index_count):
'''Create an `IndexedVertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create
`index_count`
Number of indices to create
'''
start = self._safe_alloc(count)
index_start = self._safe_index_alloc(index_count)
return IndexedVertexList(self, start, count, index_start, index_count)
def get_index_region(self, start, count):
'''Get a region of the index buffer.
:Parameters:
`start` : int
Start of the region to map.
`count` : int
Number of indices to map.
:rtype: Array of int
'''
byte_start = self.index_element_size * start
byte_count = self.index_element_size * count
ptr_type = ctypes.POINTER(self.index_c_type * count)
return self.index_buffer.get_region(byte_start, byte_count, ptr_type)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `IndexedVertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
self.index_buffer.bind()
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawElements(mode, vertex_list.index_count, self.index_gl_type,
self.index_buffer.ptr +
vertex_list.index_start * self.index_element_size)
else:
starts, sizes = self.index_allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawElements(mode, sizes[0], self.index_gl_type,
self.index_buffer.ptr + starts[0])
elif gl_info.have_version(1, 4):
starts = [s * self.index_element_size + self.index_buffer.ptr for s in starts]
starts = ctypes.cast((GLuint * primcount)(*starts), ctypes.POINTER(ctypes.c_void_p))
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawElements(mode, sizes, GL_UNSIGNED_INT, starts,
primcount)
else:
for start, size in zip(starts, sizes):
glDrawElements(mode, size, self.index_gl_type,
self.index_buffer.ptr +
start * self.index_element_size)
self.index_buffer.unbind()
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
class IndexedVertexList(VertexList):
'''A list of vertices within an `IndexedVertexDomain` that are indexed.
Use `IndexedVertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count, index_start, index_count):
super(IndexedVertexList, self).__init__(domain, start, count)
self.index_start = index_start
self.index_count = index_count
def draw(self, mode):
self.domain.draw(mode, self)
def resize(self, count, index_count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
`index_count` : int
New number of indices in the list.
'''
old_start = self.start
super(IndexedVertexList, self).resize(count)
# Change indices (because vertices moved)
if old_start != self.start:
diff = self.start - old_start
self.indices[:] = [i + diff for i in self.indices]
# Resize indices
new_start = self.domain._safe_index_realloc(
self.index_start, self.index_count, index_count)
if new_start != self.index_start:
old = self.domain.get_index_region(
self.index_start, self.index_count)
new = self.domain.get_index_region(
self.index_start, self.index_count)
new.array[:] = old.array[:]
new.invalidate()
self.index_start = new_start
self.index_count = index_count
self._indices_cache_version = None
def delete(self):
'''Delete this group.'''
super(IndexedVertexList, self).delete()
self.domain.index_allocator.dealloc(self.index_start, self.index_count)
def _set_index_data(self, data):
# TODO without region
region = self.domain.get_index_region(
self.index_start, self.index_count)
region.array[:] = data
region.invalidate()
# ---
def _get_indices(self):
if self._indices_cache_version != self.domain._version:
domain = self.domain
self._indices_cache = domain.get_index_region(
self.index_start, self.index_count)
self._indices_cache_version = domain._version
region = self._indices_cache
region.invalidate()
return region.array
def _set_indices(self, data):
self._get_indices()[:] = data
_indices_cache = None
_indices_cache_version = None
indices = property(_get_indices, _set_indices,
doc='''Array of index data.''')
| bsd-3-clause |
kitianFresh/awesome-python3-webapp | www/app.py | 1 | 5548 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from config import configs
import orm
from coroweb import add_routes, add_static
from handlers import cookie2user, COOKIE_NAME
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
# yield from asyncio.sleep(0.3)
return (yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request form: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and t >= 100 and t < 600:
return web.Response(t)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8888)
logging.info('server started at http://127.0.0.1:8888...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| apache-2.0 |
fernandog/Medusa | ext/sqlalchemy/ext/mutable.py | 1 | 32415 | # ext/mutable.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's
legacy approach to in-place mutations of scalar values; see
:ref:`07_migration_mutation_extension`.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
Receiving Events
----------------
The :meth:`.AttributeEvents.modified` event handler may be used to receive
an event when a mutable scalar emits a change event. This event handler
is called when the :func:`.attributes.flag_modified` function is called
from within the mutable extension::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import event
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
@event.listens_for(MyDataClass.data, "modified")
def modified_json(instance):
print("json value modified:", instance.data)
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from ..orm.attributes import flag_modified
from .. import event, types
from ..orm import mapper, object_mapper, Mapper
from ..util import memoized_property
from ..sql.base import SchemaEventTarget
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _get_listen_keys(cls, attribute):
"""Given a descriptor attribute, return a ``set()`` of the attribute
keys which indicate a change in the state of this attribute.
This is normally just ``set([attribute.key])``, but can be overridden
to provide for additional keys. E.g. a :class:`.MutableComposite`
augments this set with the attribute keys associated with the columns
that comprise the composite value.
This collection is consulted in the case of intercepting the
:meth:`.InstanceEvents.refresh` and
:meth:`.InstanceEvents.refresh_flush` events, which pass along a list
of attribute names that have been refreshed; the list is compared
against this set to determine if action needs to be taken.
.. versionadded:: 1.0.5
"""
return {attribute.key}
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
listen_keys = cls._get_listen_keys(attribute)
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def load_attrs(state, ctx, attrs):
if not attrs or listen_keys.intersection(attrs):
load(state)
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if value is oldvalue:
return value
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load_attrs,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh_flush', load_attrs,
raw=True, propagate=True)
event.listen(attribute, 'set', set,
raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle,
raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle,
raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :class:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
# a SchemaType will be copied when the Column is copied,
# and we'll lose our ability to link that type back to the original.
# so track our original type w/ columns
if isinstance(sqltype, SchemaEventTarget):
@event.listens_for(sqltype, "before_parent_attach")
def _add_column_memo(sqltyp, parent):
parent.info['_ext_mutable_orig_type'] = sqltyp
schema_event_check = True
else:
schema_event_check = False
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if (
schema_event_check and
hasattr(prop.expression, 'info') and
prop.expression.info.get('_ext_mutable_orig_type')
is sqltype
) or (
prop.columns[0].type is sqltype
):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
@classmethod
def _get_listen_keys(cls, attribute):
return {attribute.key}.union(attribute.property._attribute_keys)
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
isinstance(prop.composite_class, type) and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not event.contains(Mapper, "mapper_configured", _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
The :class:`.MutableDict` object implements a dictionary that will
emit change events to the underlying mapping when the contents of
the dictionary are altered, including when values are added or removed.
Note that :class:`.MutableDict` does **not** apply mutable tracking to the
*values themselves* inside the dictionary. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
dictionary structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableDict` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 0.8
.. seealso::
:class:`.MutableList`
:class:`.MutableSet`
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def setdefault(self, key, value):
result = dict.setdefault(self, key, value)
self.changed()
return result
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def update(self, *a, **kw):
dict.update(self, *a, **kw)
self.changed()
def pop(self, *arg):
result = dict.pop(self, *arg)
self.changed()
return result
def popitem(self):
result = dict.popitem(self)
self.changed()
return result
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, dict):
return cls(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class MutableList(Mutable, list):
"""A list type that implements :class:`.Mutable`.
The :class:`.MutableList` object implements a list that will
emit change events to the underlying mapping when the contents of
the list are altered, including when values are added or removed.
Note that :class:`.MutableList` does **not** apply mutable tracking to the
*values themselves* inside the list. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableList` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableSet`
"""
def __setitem__(self, index, value):
"""Detect list set events and emit change events."""
list.__setitem__(self, index, value)
self.changed()
def __setslice__(self, start, end, value):
"""Detect list set events and emit change events."""
list.__setslice__(self, start, end, value)
self.changed()
def __delitem__(self, index):
"""Detect list del events and emit change events."""
list.__delitem__(self, index)
self.changed()
def __delslice__(self, start, end):
"""Detect list del events and emit change events."""
list.__delslice__(self, start, end)
self.changed()
def pop(self, *arg):
result = list.pop(self, *arg)
self.changed()
return result
def append(self, x):
list.append(self, x)
self.changed()
def extend(self, x):
list.extend(self, x)
self.changed()
def __iadd__(self, x):
self.extend(x)
return self
def insert(self, i, x):
list.insert(self, i, x)
self.changed()
def remove(self, i):
list.remove(self, i)
self.changed()
def clear(self):
list.clear(self)
self.changed()
def sort(self):
list.sort(self)
self.changed()
def reverse(self):
list.reverse(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain list to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, list):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class MutableSet(Mutable, set):
"""A set type that implements :class:`.Mutable`.
The :class:`.MutableSet` object implements a set that will
emit change events to the underlying mapping when the contents of
the set are altered, including when values are added or removed.
Note that :class:`.MutableSet` does **not** apply mutable tracking to the
*values themselves* inside the set. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure. To support this use case,
build a subclass of :class:`.MutableSet` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableList`
"""
def update(self, *arg):
set.update(self, *arg)
self.changed()
def intersection_update(self, *arg):
set.intersection_update(self, *arg)
self.changed()
def difference_update(self, *arg):
set.difference_update(self, *arg)
self.changed()
def symmetric_difference_update(self, *arg):
set.symmetric_difference_update(self, *arg)
self.changed()
def __ior__(self, other):
self.update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __ixor__(self, other):
self.symmetric_difference_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def add(self, elem):
set.add(self, elem)
self.changed()
def remove(self, elem):
set.remove(self, elem)
self.changed()
def discard(self, elem):
set.discard(self, elem)
self.changed()
def pop(self, *arg):
result = set.pop(self, *arg)
self.changed()
return result
def clear(self):
set.clear(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain set to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, set):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self):
return set(self)
def __setstate__(self, state):
self.update(state)
def __reduce_ex__(self, proto):
return (self.__class__, (list(self), ))
| gpl-3.0 |
brakhane/python-mode | pymode/libs/pylama/hook.py | 17 | 2874 | """ SCM hooks. Integration with git and mercurial. """
from __future__ import absolute_import
import sys
from os import path as op, chmod
from subprocess import Popen, PIPE
from .main import LOGGER
from .config import parse_options, setup_logger
try:
from configparser import ConfigParser # noqa
except ImportError: # Python 2
from ConfigParser import ConfigParser
def run(command):
""" Run a shell command.
:return str: Stdout
"""
p = Popen(command.split(), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
return (p.returncode, [line.strip() for line in stdout.splitlines()],
[line.strip() for line in stderr.splitlines()])
def git_hook():
""" Run pylama after git commit. """
from .main import check_files
_, files_modified, _ = run("git diff-index --cached --name-only HEAD")
options = parse_options()
setup_logger(options)
check_files([f for f in map(str, files_modified)], options)
def hg_hook(ui, repo, node=None, **kwargs):
""" Run pylama after mercurial commit. """
from .main import check_files
seen = set()
paths = []
if len(repo):
for rev in range(repo[node], len(repo)):
for file_ in repo[rev].files():
file_ = op.join(repo.root, file_)
if file_ in seen or not op.exists(file_):
continue
seen.add(file_)
paths.append(file_)
options = parse_options()
setup_logger(options)
check_files(paths, options)
def install_git(path):
""" Install hook in Git repository. """
hook = op.join(path, 'pre-commit')
with open(hook, 'w') as fd:
fd.write("""#!/usr/bin/env python
import sys
from pylama.hook import git_hook
if __name__ == '__main__':
sys.exit(git_hook())
""")
chmod(hook, 484)
def install_hg(path):
""" Install hook in Mercurial repository. """
hook = op.join(path, 'hgrc')
if not op.isfile(hook):
open(hook, 'w+').close()
c = ConfigParser()
c.readfp(open(path, 'r'))
if not c.has_section('hooks'):
c.add_section('hooks')
if not c.has_option('hooks', 'commit'):
c.set('hooks', 'commit', 'python:pylama.hooks.hg_hook')
if not c.has_option('hooks', 'qrefresh'):
c.set('hooks', 'qrefresh', 'python:pylama.hooks.hg_hook')
c.write(open(path, 'w+'))
def install_hook(path):
""" Auto definition of SCM and hook installation. """
git = op.join(path, '.git', 'hooks')
hg = op.join(path, '.hg')
if op.exists(git):
install_git(git)
LOGGER.warn('Git hook has been installed.')
elif op.exists(hg):
install_hg(git)
LOGGER.warn('Mercurial hook has been installed.')
else:
LOGGER.error('VCS has not found. Check your path.')
sys.exit(1)
# lint_ignore=F0401,E1103
| lgpl-3.0 |
ingenieroariel/geonode | geonode/upload/templatetags/upload_tags.py | 32 | 4015 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import template
register = template.Library()
@register.simple_tag
def upload_js():
return """
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td class="preview"><span class="fade"></span></td>
<td class="name"><span>{%=file.name%}</span></td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
{% if (file.error) { %}
<td class="error" colspan="2"><span class="label label-important">{%=locale.fileupload.error%} \
</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else if (o.files.valid && !i) { %}
<td>
<div class="progress progress-success progress-striped active"><div class="bar" style="width:0%;">\
</div></div>
</td>
<td class="start">{% if (!o.options.autoUpload) { %}
<button class="btn btn-success">
<i class="icon-upload icon-white"></i>
<span>{%=locale.fileupload.start%}</span>
</button>
{% } %}</td>
{% } else { %}
<td colspan="2"></td>
{% } %}
<td class="cancel">{% if (!i) { %}
<button class="btn btn-warning">
<i class="icon-ban-circle icon-white"></i>
<span>{%=locale.fileupload.cancel%}</span>
</button>
{% } %}</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
{% if (file.error) { %}
<td></td>
<td class="name"><span>{%=file.name%}</span></td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
<td class="error" colspan="2"><span class="label label-important">{%=locale.fileupload.error%}\
</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else { %}
<td class="preview">{% if (file.thumbnail_url) { %}
<a href="{%=file.url%}" title="{%=file.name%}" rel="gallery" download="{%=file.name%}">\
<img src="{%=file.thumbnail_url%}"></a>
{% } %}</td>
<td class="name">
<a href="{%=file.url%}" title="{%=file.name%}" rel="{%=file.thumbnail_url&&'gallery'%}" \
download="{%=file.name%}">{%=file.name%}</a>
</td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
<td colspan="2"></td>
{% } %}
<td class="delete">
<button class="btn btn-danger" data-type="{%=file.delete_type%}" data-url="{%=file.delete_url%}">
<i class="icon-trash icon-white"></i>
<span>{%=locale.fileupload.destroy%}</span>
</button>
<input type="checkbox" name="delete" value="1">
</td>
</tr>
{% } %}
</script>
"""
| gpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/amazon/aws_region_info.py | 12 | 3757 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: aws_region_info
short_description: Gather information about AWS regions.
description:
- Gather information about AWS regions.
- This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change.
version_added: '2.5'
author: 'Henrique Rodrigues (@Sodki)'
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
default: {}
extends_documentation_fragment:
- aws
- ec2
requirements: [botocore, boto3]
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all regions
- aws_region_info:
# Gather information about a single region
- aws_region_info:
filters:
region-name: eu-west-1
'''
RETURN = '''
regions:
returned: on success
description: >
Regions that match the provided filters. Each element consists of a dict with all the information related
to that region.
type: list
sample: "[{
'endpoint': 'ec2.us-west-1.amazonaws.com',
'region_name': 'us-west-1'
}]"
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, HAS_BOTO3
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # will be detected by imported HAS_BOTO3
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'aws_region_facts':
module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(
module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params
)
# Replace filter key underscores with dashes, for compatibility
sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
try:
regions = connection.describe_regions(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except ClientError as e:
module.fail_json(msg="Unable to describe regions: {0}".format(to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to describe regions: {0}".format(to_native(e)),
exception=traceback.format_exc())
module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
if __name__ == '__main__':
main()
| gpl-3.0 |
GladeRom/android_external_chromium_org | build/android/asan_symbolize.py | 96 | 3102 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import optparse
import os
import re
import sys
from pylib import constants
# Uses symbol.py from third_party/android_platform, not python's.
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT,
'third_party/android_platform/development/scripts'))
import symbol
_RE_ASAN = re.compile(r'(.*?)(#\S*?) (\S*?) \((.*?)\+(.*?)\)')
def _ParseAsanLogLine(line):
m = re.match(_RE_ASAN, line)
if not m:
return None
return {
'prefix': m.group(1),
'library': m.group(4),
'pos': m.group(2),
'rel_address': '%08x' % int(m.group(5), 16),
}
def _FindASanLibraries():
asan_lib_dir = os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'llvm-build',
'Release+Asserts', 'lib')
asan_libs = []
for src_dir, _, files in os.walk(asan_lib_dir):
asan_libs += [os.path.relpath(os.path.join(src_dir, f))
for f in files
if f.endswith('.so')]
return asan_libs
def _TranslateLibPath(library, asan_libs):
for asan_lib in asan_libs:
if os.path.basename(library) == os.path.basename(asan_lib):
return '/' + asan_lib
return symbol.TranslateLibPath(library)
def _Symbolize(asan_input):
asan_libs = _FindASanLibraries()
libraries = collections.defaultdict(list)
asan_lines = []
for asan_log_line in [a.rstrip() for a in asan_input]:
m = _ParseAsanLogLine(asan_log_line)
if m:
libraries[m['library']].append(m)
asan_lines.append({'raw_log': asan_log_line, 'parsed': m})
all_symbols = collections.defaultdict(dict)
for library, items in libraries.iteritems():
libname = _TranslateLibPath(library, asan_libs)
lib_relative_addrs = set([i['rel_address'] for i in items])
info_dict = symbol.SymbolInformationForSet(libname,
lib_relative_addrs,
True)
if info_dict:
all_symbols[library]['symbols'] = info_dict
for asan_log_line in asan_lines:
m = asan_log_line['parsed']
if not m:
print asan_log_line['raw_log']
continue
if (m['library'] in all_symbols and
m['rel_address'] in all_symbols[m['library']]['symbols']):
s = all_symbols[m['library']]['symbols'][m['rel_address']][0]
print '%s%s %s %s' % (m['prefix'], m['pos'], s[0], s[1])
else:
print asan_log_line['raw_log']
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--logcat',
help='File containing adb logcat output with ASan stacks. '
'Use stdin if not specified.')
options, _ = parser.parse_args()
if options.logcat:
asan_input = file(options.logcat, 'r')
else:
asan_input = sys.stdin
_Symbolize(asan_input.readlines())
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
tndatacommons/tndata_backend | tndata_backend/goals/tests/test_managers.py | 2 | 12741 | import pytz
from datetime import date, time
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from model_mommy import mommy
from utils.user_utils import tzdt
from .. models import (
Action,
Category,
DailyProgress,
Goal,
Trigger,
UserAction,
UserCompletedAction,
)
from .. settings import (
DEFAULT_MORNING_GOAL_TRIGGER_NAME,
DEFAULT_MORNING_GOAL_TRIGGER_TIME,
DEFAULT_MORNING_GOAL_TRIGGER_RRULE,
DEFAULT_EVENING_GOAL_TRIGGER_NAME,
DEFAULT_EVENING_GOAL_TRIGGER_TIME,
DEFAULT_EVENING_GOAL_TRIGGER_RRULE,
)
User = get_user_model()
class TestUserActionManager(TestCase):
def test_smoke(self):
"""A smoke test to ensure that chaining methods doesn't blow up."""
self.assertEqual(list(UserAction.objects.published()), [])
self.assertEqual(list(UserAction.objects.upcoming()), [])
self.assertEqual(list(UserAction.objects.stale()), [])
# And that chaining is possible
qs = UserAction.objects.published().stale()
self.assertEqual(list(qs), [])
qs = UserAction.objects.published().upcoming()
self.assertEqual(list(qs), [])
class TestCategoryManager(TestCase):
"""Tests for the `CategoryManager` manager."""
@classmethod
def setUpTestData(cls):
cls.draft_category = Category.objects.create(
order=1,
title="Draft Category",
)
cls.published_category = Category.objects.create(
order=2,
title="Published",
state="published",
)
cls.packaged_category = Category.objects.create(
order=3,
title="Packaged",
state="published",
packaged_content=True
)
cls.default_category = Category.objects.create(
order=4,
title="Default",
state="published",
selected_by_default=True
)
cls.draft_default_category = Category.objects.create(
order=5,
title="Draft Default",
selected_by_default=True
)
def test_selected_by_default(self):
# Will return all categories selected by default without any args...
results = sorted([c.title for c in Category.objects.selected_by_default()])
expected = sorted(['Draft Default', 'Default'])
self.assertEqual(results, expected)
# Will exclude draft content if given the correct kwargs.
cats = Category.objects.selected_by_default(state='published')
results = [c.title for c in cats]
expected = ['Default']
self.assertEqual(results, expected)
def test_published(self):
results = Category.objects.published()
results = list(results.values_list('title', flat=True))
self.assertEqual(results, ['Published', 'Default'])
class TestDailyProgressManager(TestCase):
"""Tests for the `DailyProgressManager` manager."""
def test_exists_today(self):
# When a user has no data
u = User.objects.create_user('dp-exists', 'dp-exists@example.com', 'x')
self.assertIsNone(DailyProgress.objects.exists_today(u))
# When a user does have a DailyProgress instance
dp = DailyProgress.objects.create(user=u)
self.assertEqual(dp.id, DailyProgress.objects.exists_today(u))
# clean up
u.delete()
dp.delete()
def test_for_today(self):
# When a user has no data
u = User.objects.create_user('dp-exists', 'dp-exists@example.com', 'x')
dp = DailyProgress.objects.for_today(u)
self.assertEqual(dp.user, u)
# When we fetch it again, it should return the same instance.
other_dp = DailyProgress.objects.for_today(u)
self.assertEqual(dp.id, other_dp.id)
# clean up
u.delete()
dp.delete()
def test_engagement_rank(self):
action = mommy.make(Action, title="A")
user_a = User.objects.create_user('a', 'a@a.a', 'p')
user_b = User.objects.create_user('b', 'b@b.b', 'p')
ua_a = mommy.make(UserAction, user=user_a, action=action)
ua_b = mommy.make(UserAction, user=user_b, action=action)
uca_a = mommy.make(UserCompletedAction, user=user_a, action=action,
useraction=ua_a, state='completed')
uca_b = mommy.make(UserCompletedAction, user=user_b, action=action,
useraction=ua_b, state='dismissed')
dp_a = DailyProgress.objects.for_today(user_a)
dp_a.calculate_engagement()
dp_a.save()
dp_b = DailyProgress.objects.for_today(user_a)
dp_b.calculate_engagement()
dp_b.save()
ua_rank = DailyProgress.objects.engagement_rank(user_a)
ub_rank = DailyProgress.objects.engagement_rank(user_b)
self.assertEqual(ua_rank, 50.0)
self.assertEqual(ub_rank, 0.0)
# clean up
for obj in [uca_a, uca_b, ua_a, ua_b, user_a, user_b, action]:
obj.delete()
class TestGoalManager(TestCase):
"""Tests for the `GoalManager` manager."""
@classmethod
def setUpTestData(cls):
cls.draft_category = Category.objects.create(
order=1,
title="Draft Category",
)
cls.published_category = Category.objects.create(
order=2,
title="Published Category",
state="published"
)
cls.packaged_category = Category.objects.create(
order=3,
title="Packaged Category",
state="published",
packaged_content=True
)
cls.g1 = Goal.objects.create(title='One', state='published')
cls.g1.categories.add(cls.draft_category)
cls.g2 = Goal.objects.create(title='Two', state='published')
cls.g2.categories.add(cls.published_category)
cls.g3 = Goal.objects.create(title='Three', state='published')
cls.g3.categories.add(cls.packaged_category)
def test_published(self):
"""Published goals should exclude both unpublished categories and
packaged content."""
results = Goal.objects.published()
self.assertEqual(list(results), [self.g2])
def test_published_with_multiple_categories(self):
"""Test the case when a goal is part of a published Category AND
is also in a published Package (Category). The .pubished() method should
still return the goal.
"""
# Create a goal that's in both a published package and category
goal = Goal.objects.create(title='Dual Goal', state='published')
goal.categories.add(self.published_category)
goal.categories.add(self.packaged_category)
# The goal should be in the set of published goals.
self.assertIn(goal, Goal.objects.published())
def test_packages(self):
"""The packages method should only return packaged content.
It should also accept queryest parameters."""
results = Goal.objects.packages()
self.assertEqual(list(results), [self.g3])
results = Goal.objects.packages(categories=self.packaged_category)
self.assertEqual(list(results), [self.g3])
results = Goal.objects.packages(categories=self.draft_category)
self.assertEqual(list(results), [])
class TestTriggerManager(TestCase):
"""Tests for the `TriggerManager` manager."""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user("u", "u@a.com", "pass")
cls.default_trigger = Trigger.objects.create(
name="Default Trigger",
time=time(12, 34),
recurrences="RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR",
)
cls.custom_trigger = Trigger.objects.create(
user=cls.user,
name="A Custom Trigger",
trigger_date=date(2243, 7, 4),
time=time(12, 34),
recurrences="RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR",
)
def test_get_default_morning_goal_trigger(self):
# There's no default trigger at the moment, so calling this creates one
t = Trigger.objects.get_default_morning_goal_trigger()
self.assertEqual(t.name, DEFAULT_MORNING_GOAL_TRIGGER_NAME)
self.assertEqual(t.serialized_recurrences(), DEFAULT_MORNING_GOAL_TRIGGER_RRULE)
self.assertEqual(t.time.strftime("%H:%M"), DEFAULT_MORNING_GOAL_TRIGGER_TIME)
# Calling this again should return the original.
obj = Trigger.objects.get_default_morning_goal_trigger()
self.assertEqual(obj.id, t.id)
def test_get_default_evening_goal_trigger(self):
# There's no default trigger at the moment, so calling this creates one
t = Trigger.objects.get_default_evening_goal_trigger()
self.assertEqual(t.name, DEFAULT_EVENING_GOAL_TRIGGER_NAME)
self.assertEqual(t.serialized_recurrences(), DEFAULT_EVENING_GOAL_TRIGGER_RRULE)
self.assertEqual(t.time.strftime("%H:%M"), DEFAULT_EVENING_GOAL_TRIGGER_TIME)
# Calling this again should return the original.
obj = Trigger.objects.get_default_evening_goal_trigger()
self.assertEqual(obj.id, t.id)
def test_custom(self):
"""Ensure the custom method only returns custom triggers."""
self.assertIn(self.custom_trigger, Trigger.objects.custom())
self.assertNotIn(self.default_trigger, Trigger.objects.custom())
def test_default(self):
"""Ensure the default method only returns default triggers."""
self.assertIn(self.default_trigger, Trigger.objects.default())
self.assertNotIn(self.custom_trigger, Trigger.objects.default())
def test_for_user(self):
"""Ensure a user's triggers are returned."""
self.assertIn(
self.custom_trigger,
Trigger.objects.for_user(self.user)
)
self.assertNotIn(
self.default_trigger,
Trigger.objects.for_user(self.user)
)
def test_create_for_user(self):
with patch('goals.models.triggers.timezone') as mock_tz:
mock_tz.is_naive = timezone.is_naive
mock_tz.is_aware = timezone.is_aware
mock_tz.make_aware = timezone.make_aware
mock_tz.make_naive = timezone.make_naive
mock_tz.utc = timezone.utc
mock_tz.now.return_value = tzdt(2015, 3, 14, 8, 30)
# When there's a time & recurrence
trigger = Trigger.objects.create_for_user(
self.user,
"New Trigger",
time(8, 30),
None,
"RRULE:FREQ=WEEKLY;BYDAY=MO",
)
self.assertEqual(
trigger.recurrences_as_text(),
"weekly, each Monday"
)
# when there's a time & a date
trigger = Trigger.objects.create_for_user(
self.user,
"Other New Trigger",
time(9, 30),
date(2015, 3, 15),
None
)
tz = pytz.timezone(self.user.userprofile.timezone)
expected = tzdt(2015, 3, 15, 9, 30, tz=tz)
self.assertEqual(
trigger.next().strftime("%c %z"),
expected.strftime("%c %z")
)
def test_create_for_useraction(self):
a = Action.objects.create(title='Test Action')
ua = UserAction.objects.create(user=self.user, action=a)
trigger = Trigger.objects.create_for_user(
self.user,
ua.get_custom_trigger_name(),
time(8, 30),
None,
"RRULE:FREQ=WEEKLY;BYDAY=MO",
ua
)
ua = UserAction.objects.get(pk=ua.id)
self.assertEqual(trigger.useraction_set.count(), 1)
self.assertEqual(ua.custom_trigger, trigger)
# Clean up
ua.delete()
a.delete()
class TestUserCompletedActionManager(TestCase):
"""Tests for the `UserCompletedActionManager` manager."""
@classmethod
def setUpTestData(cls):
cls.action = mommy.make(Action, title="A", state='published')
cls.user = User.objects.create_user('u', 'x@y.z', 'p')
cls.useraction = mommy.make(UserAction, user=cls.user, action=cls.action)
cls.uca = mommy.make(UserCompletedAction, user=cls.user,
action=cls.action, useraction=cls.useraction,
state='completed')
def test_engagement(self):
value = UserCompletedAction.objects.engagement(self.user, days=15)
self.assertEqual(value, 100.0)
| mit |
libracore/erpnext | erpnext/accounts/report/trial_balance_for_party/trial_balance_for_party.py | 8 | 5969 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cint
from erpnext.accounts.report.trial_balance.trial_balance import validate_filters
def execute(filters=None):
validate_filters(filters)
show_party_name = is_party_name_visible(filters)
columns = get_columns(filters, show_party_name)
data = get_data(filters, show_party_name)
return columns, data
def get_data(filters, show_party_name):
party_name_field = "{0}_name".format(frappe.scrub(filters.get('party_type')))
if filters.get('party_type') == 'Student':
party_name_field = 'first_name'
elif filters.get('party_type') == 'Shareholder':
party_name_field = 'title'
party_filters = {"name": filters.get("party")} if filters.get("party") else {}
parties = frappe.get_all(filters.get("party_type"), fields = ["name", party_name_field],
filters = party_filters, order_by="name")
company_currency = frappe.get_cached_value('Company', filters.company, "default_currency")
opening_balances = get_opening_balances(filters)
balances_within_period = get_balances_within_period(filters)
data = []
# total_debit, total_credit = 0, 0
total_row = frappe._dict({
"opening_debit": 0,
"opening_credit": 0,
"debit": 0,
"credit": 0,
"closing_debit": 0,
"closing_credit": 0
})
for party in parties:
row = { "party": party.name }
if show_party_name:
row["party_name"] = party.get(party_name_field)
# opening
opening_debit, opening_credit = opening_balances.get(party.name, [0, 0])
row.update({
"opening_debit": opening_debit,
"opening_credit": opening_credit
})
# within period
debit, credit = balances_within_period.get(party.name, [0, 0])
row.update({
"debit": debit,
"credit": credit
})
# closing
closing_debit, closing_credit = toggle_debit_credit(opening_debit + debit, opening_credit + credit)
row.update({
"closing_debit": closing_debit,
"closing_credit": closing_credit
})
# totals
for col in total_row:
total_row[col] += row.get(col)
row.update({
"currency": company_currency
})
has_value = False
if (opening_debit or opening_credit or debit or credit or closing_debit or closing_credit):
has_value =True
if cint(filters.show_zero_values) or has_value:
data.append(row)
# Add total row
total_row.update({
"party": "'" + _("Totals") + "'",
"currency": company_currency
})
data.append(total_row)
return data
def get_opening_balances(filters):
gle = frappe.db.sql("""
select party, sum(debit) as opening_debit, sum(credit) as opening_credit
from `tabGL Entry`
where company=%(company)s
and ifnull(party_type, '') = %(party_type)s and ifnull(party, '') != ''
and (posting_date < %(from_date)s or ifnull(is_opening, 'No') = 'Yes')
group by party""", {
"company": filters.company,
"from_date": filters.from_date,
"party_type": filters.party_type
}, as_dict=True)
opening = frappe._dict()
for d in gle:
opening_debit, opening_credit = toggle_debit_credit(d.opening_debit, d.opening_credit)
opening.setdefault(d.party, [opening_debit, opening_credit])
return opening
def get_balances_within_period(filters):
gle = frappe.db.sql("""
select party, sum(debit) as debit, sum(credit) as credit
from `tabGL Entry`
where company=%(company)s
and ifnull(party_type, '') = %(party_type)s and ifnull(party, '') != ''
and posting_date >= %(from_date)s and posting_date <= %(to_date)s
and ifnull(is_opening, 'No') = 'No'
group by party""", {
"company": filters.company,
"from_date": filters.from_date,
"to_date": filters.to_date,
"party_type": filters.party_type
}, as_dict=True)
balances_within_period = frappe._dict()
for d in gle:
balances_within_period.setdefault(d.party, [d.debit, d.credit])
return balances_within_period
def toggle_debit_credit(debit, credit):
if flt(debit) > flt(credit):
debit = flt(debit) - flt(credit)
credit = 0.0
else:
credit = flt(credit) - flt(debit)
debit = 0.0
return debit, credit
def get_columns(filters, show_party_name):
columns = [
{
"fieldname": "party",
"label": _(filters.party_type),
"fieldtype": "Link",
"options": filters.party_type,
"width": 200
},
{
"fieldname": "opening_debit",
"label": _("Opening (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "opening_credit",
"label": _("Opening (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "debit",
"label": _("Debit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "credit",
"label": _("Credit"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "closing_debit",
"label": _("Closing (Dr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "closing_credit",
"label": _("Closing (Cr)"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
}
]
if show_party_name:
columns.insert(1, {
"fieldname": "party_name",
"label": _(filters.party_type) + " Name",
"fieldtype": "Data",
"width": 200
})
return columns
def is_party_name_visible(filters):
show_party_name = False
if filters.get('party_type') in ['Customer', 'Supplier']:
if filters.get("party_type") == "Customer":
party_naming_by = frappe.db.get_single_value("Selling Settings", "cust_master_name")
else:
party_naming_by = frappe.db.get_single_value("Buying Settings", "supp_master_name")
if party_naming_by == "Naming Series":
show_party_name = True
else:
show_party_name = True
return show_party_name | gpl-3.0 |
dreamsxin/kbengine | kbe/src/lib/python/Lib/idlelib/FormatParagraph.py | 76 | 7194 | """Extension to format a paragraph or selection to a max width.
Does basic, standard text formatting, and also understands Python
comment blocks. Thus, for editing Python source code, this
extension is really only suitable for reformatting these comment
blocks or triple-quoted strings.
Known problems with comment reformatting:
* If there is a selection marked, and the first line of the
selection is not complete, the block will probably not be detected
as comments, and will have the normal "text formatting" rules
applied.
* If a comment block has leading whitespace that mixes tabs and
spaces, they will not be considered part of the same block.
* Fancy comments, like this bulleted list, aren't handled :-)
"""
import re
from idlelib.configHandler import idleConf
class FormatParagraph:
menudefs = [
('format', [ # /s/edit/format dscherer@cmu.edu
('Format Paragraph', '<<format-paragraph>>'),
])
]
def __init__(self, editwin):
self.editwin = editwin
def close(self):
self.editwin = None
def format_paragraph_event(self, event, limit=None):
"""Formats paragraph to a max width specified in idleConf.
If text is selected, format_paragraph_event will start breaking lines
at the max width, starting from the beginning selection.
If no text is selected, format_paragraph_event uses the current
cursor location to determine the paragraph (lines of text surrounded
by blank lines) and formats it.
The length limit parameter is for testing with a known value.
"""
if limit == None:
limit = idleConf.GetOption(
'main', 'FormatParagraph', 'paragraph', type='int')
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
comment_header = get_comment_header(data)
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
newdata = reformat_comment(data, limit, comment_header)
else:
newdata = reformat_paragraph(data, limit)
text.tag_remove("sel", "1.0", "end")
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
text.delete(first, last)
text.insert(first, newdata)
text.undo_block_stop()
else:
text.mark_set("insert", last)
text.see("insert")
return "break"
def find_paragraph(text, mark):
"""Returns the start/stop indices enclosing the paragraph that mark is in.
Also returns the comment format string, if any, and paragraph of text
between the start/stop indices.
"""
lineno, col = map(int, mark.split("."))
line = text.get("%d.0" % lineno, "%d.end" % lineno)
# Look for start of next paragraph if the index passed in is a blank line
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
# Once start line found, search for end of paragraph (a blank line)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
last = "%d.0" % lineno
# Search back to beginning of paragraph (first blank line before)
lineno = first_lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first = "%d.0" % (lineno+1)
return first, last, comment_header, text.get(first, last)
# This should perhaps be replaced with textwrap.wrap
def reformat_paragraph(data, limit):
"""Return data reformatted to specified width (limit)."""
lines = data.split("\n")
i = 0
n = len(lines)
while i < n and is_all_white(lines[i]):
i = i+1
if i >= n:
return data
indent1 = get_indent(lines[i])
if i+1 < n and not is_all_white(lines[i+1]):
indent2 = get_indent(lines[i+1])
else:
indent2 = indent1
new = lines[:i]
partial = indent1
while i < n and not is_all_white(lines[i]):
# XXX Should take double space after period (etc.) into account
words = re.split("(\s+)", lines[i])
for j in range(0, len(words), 2):
word = words[j]
if not word:
continue # Can happen when line ends in whitespace
if len((partial + word).expandtabs()) > limit and \
partial != indent1:
new.append(partial.rstrip())
partial = indent2
partial = partial + word + " "
if j+1 < len(words) and words[j+1] != " ":
partial = partial + " "
i = i+1
new.append(partial.rstrip())
# XXX Should reformat remaining paragraphs as well
new.extend(lines[i:])
return "\n".join(new)
def reformat_comment(data, limit, comment_header):
"""Return data reformatted to specified width with comment header."""
# Remove header from the comment lines
lc = len(comment_header)
data = "\n".join(line[lc:] for line in data.split("\n"))
# Reformat to maxformatwidth chars or a 20 char width,
# whichever is greater.
format_width = max(limit - len(comment_header), 20)
newdata = reformat_paragraph(data, format_width)
# re-split and re-insert the comment header.
newdata = newdata.split("\n")
# If the block ends in a \n, we dont want the comment prefix
# inserted after it. (Im not sure it makes sense to reformat a
# comment block that is not made of complete lines, but whatever!)
# Can't think of a clean solution, so we hack away
block_suffix = ""
if not newdata[-1]:
block_suffix = "\n"
newdata = newdata[:-1]
return '\n'.join(comment_header+line for line in newdata) + block_suffix
def is_all_white(line):
"""Return True if line is empty or all whitespace."""
return re.match(r"^\s*$", line) is not None
def get_indent(line):
"""Return the initial space or tab indent of line."""
return re.match(r"^([ \t]*)", line).group()
def get_comment_header(line):
"""Return string with leading whitespace and '#' from line or ''.
A null return indicates that the line is not a comment line. A non-
null return, such as ' #', will be used to find the other lines of
a comment block with the same indent.
"""
m = re.match(r"^([ \t]*#*)", line)
if m is None: return ""
return m.group(1)
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_formatparagraph',
verbosity=2, exit=False)
| lgpl-3.0 |
amjith/python-prompt-toolkit | prompt_toolkit/output.py | 2 | 2381 | """
Interface for an output.
The actual implementations are in
`prompt_toolkit.terminal.vt100_output/win32_output`.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from six import with_metaclass
__all__ = (
'Output',
)
class Output(with_metaclass(ABCMeta, object)):
"""
Base class defining the Output interface for a renderer.
"""
@abstractmethod
def write(self, data):
pass
@abstractmethod
def set_title(self, title):
" Set terminal title. "
@abstractmethod
def clear_title(self):
" Clear title again. (or restore previous title.) "
@abstractmethod
def flush(self):
" Write to output stream and flush. "
@abstractmethod
def erase_screen(self):
"""
Erases the screen with the background colour and moves the cursor to
home.
"""
@abstractmethod
def enter_alternate_screen(self):
pass
@abstractmethod
def quit_alternate_screen(self):
pass
@abstractmethod
def enable_mouse_support(self):
pass
@abstractmethod
def disable_mouse_support(self):
pass
@abstractmethod
def erase_end_of_line(self):
"""
Erases from the current cursor position to the end of the current line.
"""
@abstractmethod
def erase_down(self):
"""
Erases the screen from the current line down to the bottom of the
screen.
"""
@abstractmethod
def reset_attributes(self):
pass
@abstractmethod
def set_attributes(self, fgcolor=None, bgcolor=None, bold=False, underline=False):
"""
Create new style and output.
"""
pass
@abstractmethod
def disable_autowrap(self):
pass
@abstractmethod
def enable_autowrap(self):
pass
@abstractmethod
def cursor_goto(self, row=0, column=0):
""" Move cursor position. """
@abstractmethod
def cursor_up(self, amount):
pass
@abstractmethod
def cursor_down(self, amount):
pass
@abstractmethod
def cursor_forward(self, amount):
pass
@abstractmethod
def cursor_backward(self, amount):
pass
def ask_for_cpr(self):
"""
Asks for a cursor position report (CPR).
(VT100 only.)
"""
| bsd-3-clause |
Weil0ng/gem5 | src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py | 91 | 2388 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["move",
"move_non_temporal",
"move_mask"]
microcode = '''
# 128 bit multimedia and scientific data transfer instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
gymnasium/edx-platform | lms/djangoapps/commerce/utils.py | 9 | 15246 | """Utilities to assist with commerce tasks."""
import json
import logging
from urllib import urlencode
from urlparse import urljoin
import requests
import waffle
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from student.models import CourseEnrollment
from .models import CommerceConfiguration
log = logging.getLogger(__name__)
def is_account_activation_requirement_disabled():
"""
Checks to see if the django-waffle switch for disabling the account activation requirement is active
Returns:
Boolean value representing switch status
"""
switch_name = configuration_helpers.get_value(
'DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH',
settings.DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH
)
return waffle.switch_is_active(switch_name)
class EcommerceService(object):
""" Helper class for ecommerce service integration. """
def __init__(self):
self.config = CommerceConfiguration.current()
@property
def ecommerce_url_root(self):
""" Retrieve Ecommerce service public url root. """
return configuration_helpers.get_value('ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT)
def get_absolute_ecommerce_url(self, ecommerce_page_url):
""" Return the absolute URL to the ecommerce page.
Args:
ecommerce_page_url (str): Relative path to the ecommerce page.
Returns:
Absolute path to the ecommerce page.
"""
return urljoin(self.ecommerce_url_root, ecommerce_page_url)
def get_order_dashboard_url(self):
""" Return the URL to the ecommerce dashboard orders page.
Returns:
String: order dashboard url.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_ORDER_DASHBOARD_URL)
def get_receipt_page_url(self, order_number):
"""
Gets the URL for the Order Receipt page hosted by the ecommerce service.
Args:
order_number (str): Order number.
Returns:
Receipt page for the specified Order.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_RECEIPT_PAGE_URL + order_number)
def is_enabled(self, user):
"""
Determines the availability of the EcommerceService based on user activation and service configuration.
Note: If the user is anonymous we bypass the user activation gate and only look at the service config.
Returns:
Boolean
"""
user_is_active = user.is_active or is_account_activation_requirement_disabled()
allow_user = user_is_active or user.is_anonymous
return allow_user and self.config.checkout_on_ecommerce_service
def payment_page_url(self):
""" Return the URL for the checkout page.
Example:
http://localhost:8002/basket/add/
"""
return self.get_absolute_ecommerce_url(self.config.basket_checkout_page)
def get_checkout_page_url(self, *skus, **kwargs):
""" Construct the URL to the ecommerce checkout page and include products.
Args:
skus (list): List of SKUs associated with products to be added to basket
program_uuid (string): The UUID of the program, if applicable
Returns:
Absolute path to the ecommerce checkout page showing basket that contains specified products.
Example:
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD&bundle=3bdf1dd1-49be-4a15-9145-38901f578c5a
"""
program_uuid = kwargs.get('program_uuid')
enterprise_catalog_uuid = kwargs.get('enterprise_customer_catalog_uuid')
query_params = {'sku': skus}
if enterprise_catalog_uuid:
query_params.update({'enterprise_customer_catalog_uuid': enterprise_catalog_uuid})
url = '{checkout_page_path}?{query_params}'.format(
checkout_page_path=self.get_absolute_ecommerce_url(self.config.basket_checkout_page),
query_params=urlencode(query_params, doseq=True),
)
if program_uuid:
url = '{url}&bundle={program_uuid}'.format(
url=url,
program_uuid=program_uuid
)
return url
def upgrade_url(self, user, course_key):
"""
Returns the URL for the user to upgrade, or None if not applicable.
"""
enrollment = CourseEnrollment.get_enrollment(user, course_key)
verified_mode = enrollment.verified_mode if enrollment else None
if verified_mode:
if self.is_enabled(user):
return self.get_checkout_page_url(verified_mode.sku)
else:
return reverse('verify_student_upgrade_and_verify', args=(course_key,))
return None
def refund_entitlement(course_entitlement):
"""
Attempt a refund of a course entitlement. Verify the User before calling this refund method
Returns:
bool: True if the Refund is successfully processed.
"""
user_model = get_user_model()
enrollee = course_entitlement.user
entitlement_uuid = str(course_entitlement.uuid)
if not is_commerce_service_configured():
log.error(
'Ecommerce service is not configured, cannot refund for user [%s], course entitlement [%s].',
enrollee.id,
entitlement_uuid
)
return False
service_user = user_model.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info(
'Attempting to create a refund for user [%s], course entitlement [%s]...',
enrollee.id,
entitlement_uuid
)
try:
refund_ids = api_client.refunds.post(
{
'order_number': course_entitlement.order_number,
'username': enrollee.username,
'entitlement_uuid': entitlement_uuid,
}
)
except Exception as exc: # pylint: disable=broad-except
# Catch any possible exceptions from the Ecommerce service to ensure we fail gracefully
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], "
"course entitlement [%s] message: [%s]",
enrollee.id,
course_entitlement.uuid,
str(exc)
)
return False
if refund_ids:
log.info(
'Refund successfully opened for user [%s], course entitlement [%s]: %r',
enrollee.id,
entitlement_uuid,
refund_ids,
)
return _process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_entitlement.mode,
user=enrollee,
always_notify=True,
)
else:
log.warn('No refund opened for user [%s], course entitlement [%s]', enrollee.id, entitlement_uuid)
return False
def refund_seat(course_enrollment):
"""
Attempt to initiate a refund for any orders associated with the seat being unenrolled,
using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during communication with the E-Commerce Service.
exceptions.Timeout: if the attempt to reach the commerce service timed out.
"""
User = get_user_model() # pylint:disable=invalid-name
course_key_str = unicode(course_enrollment.course_id)
enrollee = course_enrollment.user
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info('Attempting to create a refund for user [%s], course [%s]...', enrollee.id, course_key_str)
refund_ids = api_client.refunds.post({'course_id': course_key_str, 'username': enrollee.username})
if refund_ids:
log.info('Refund successfully opened for user [%s], course [%s]: %r', enrollee.id, course_key_str, refund_ids)
_process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_enrollment.mode,
user=enrollee,
)
else:
log.info('No refund opened for user [%s], course [%s]', enrollee.id, course_key_str)
return refund_ids
def _process_refund(refund_ids, api_client, mode, user, always_notify=False):
"""
Helper method to process a refund for a given course_product. This method assumes that the User has already
been unenrolled.
Arguments:
refund_ids: List of refund ids to be processed
api_client: The API Client used in the processing of refunds
mode: The mode that the refund should be processed for
user: The user that the refund is being processed for
always_notify (bool): This will enable always notifying support with Zendesk tickets when
an approval is required
Returns:
bool: True if the refund process was successful, False if there are any Errors that are not handled
"""
config = CommerceConfiguration.current()
if config.enable_automatic_refund_approval:
refunds_requiring_approval = []
for refund_id in refund_ids:
try:
# NOTE: The following assumes that the user has already been unenrolled.
# We are then able to approve payment. Additionally, this ensures we don't tie up an
# additional web worker when the E-Commerce Service tries to unenroll the learner.
api_client.refunds(refund_id).process.put({'action': 'approve_payment_only'})
log.info('Refund [%d] successfully approved.', refund_id)
except: # pylint: disable=bare-except
# Push the refund to Support to process
log.exception('Failed to automatically approve refund [%d]!', refund_id)
refunds_requiring_approval.append(refund_id)
else:
refunds_requiring_approval = refund_ids
if refunds_requiring_approval:
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if mode != 'verified' and not always_notify:
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
'Skipping refund support notification for non-verified mode for user [%s], mode: [%s]',
user.id,
mode,
)
else:
try:
return _send_refund_notification(user, refunds_requiring_approval)
except: # pylint: disable=bare-except
# Unable to send notification to Support, do not break as this method is used by Signals
log.warning('Could not send support notification for refund.', exc_info=True)
return False
return True
def _send_refund_notification(user, refund_ids):
"""
Notify the support team of the refund request.
Returns:
bool: True if we are able to send the notification. In this case that means we were able to create
a ZenDesk ticket
"""
tags = ['auto_refund']
if theming_helpers.is_request_in_themed_site():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to support teams.")
# Build the information for the ZenDesk ticket
student = user
subject = _("[Refund] User-Requested Refund")
body = _generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
return create_zendesk_ticket(requester_name, student.email, subject, body, tags)
def _generate_refund_notification_body(student, refund_ids): # pylint: disable=invalid-name
""" Returns a refund notification message body. """
msg = _(
'A refund request has been initiated for {username} ({email}). '
'To process this request, please visit the link(s) below.'
).format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(
'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT,
)
refund_urls = [urljoin(ecommerce_url_root, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
# emails contained in this message could contain unicode characters so encode as such
return u'{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
"""
Create a Zendesk ticket via API.
Returns:
bool: False if we are unable to create the ticket for any reason
"""
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.error('Zendesk is not configured. Cannot create a ticket.')
return False
# Copy the tags to avoid modifying the original list.
tags = set(tags or [])
tags.add('LMS')
tags = list(tags)
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': unicode(requester_email)
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error('Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
return False
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return False
return True
| agpl-3.0 |
gmcic/fiona-html5 | source/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| apache-2.0 |
himleyb85/django | django/contrib/auth/mixins.py | 305 | 4087 | from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils import six
from django.utils.encoding import force_text
class AccessMixin(object):
"""
Abstract CBV mixin that gives access mixins the same customizable
functionality.
"""
login_url = None
permission_denied_message = ''
raise_exception = False
redirect_field_name = REDIRECT_FIELD_NAME
def get_login_url(self):
"""
Override this method to override the login_url attribute.
"""
login_url = self.login_url or settings.LOGIN_URL
if not login_url:
raise ImproperlyConfigured(
'{0} is missing the login_url attribute. Define {0}.login_url, settings.LOGIN_URL, or override '
'{0}.get_login_url().'.format(self.__class__.__name__)
)
return force_text(login_url)
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
return self.permission_denied_message
def get_redirect_field_name(self):
"""
Override this method to override the redirect_field_name attribute.
"""
return self.redirect_field_name
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
return redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
class LoginRequiredMixin(AccessMixin):
"""
CBV mixin which verifies that the current user is authenticated.
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return self.handle_no_permission()
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class PermissionRequiredMixin(AccessMixin):
"""
CBV mixin which verifies that the current user has all specified
permissions.
"""
permission_required = None
def get_permission_required(self):
"""
Override this method to override the permission_required attribute.
Must return an iterable.
"""
if self.permission_required is None:
raise ImproperlyConfigured(
'{0} is missing the permission_required attribute. Define {0}.permission_required, or override '
'{0}.get_permission_required().'.format(self.__class__.__name__)
)
if isinstance(self.permission_required, six.string_types):
perms = (self.permission_required, )
else:
perms = self.permission_required
return perms
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return self.request.user.has_perms(perms)
def dispatch(self, request, *args, **kwargs):
if not self.has_permission():
return self.handle_no_permission()
return super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
class UserPassesTestMixin(AccessMixin):
"""
CBV Mixin that allows you to define a test function which must return True
if the current user can access the view.
"""
def test_func(self):
raise NotImplementedError(
'{0} is missing the implementation of the test_func() method.'.format(self.__class__.__name__)
)
def get_test_func(self):
"""
Override this method to use a different test_func method.
"""
return self.test_func
def dispatch(self, request, *args, **kwargs):
user_test_result = self.get_test_func()()
if not user_test_result:
return self.handle_no_permission()
return super(UserPassesTestMixin, self).dispatch(request, *args, **kwargs)
| bsd-3-clause |
JordanP/openstack-snippets | ospurge/ospurge/resources/nova.py | 1 | 1062 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Any
from typing import Dict
from typing import Iterable
from ospurge.resources import base
class Servers(base.ServiceResource):
ORDER = 15
def list(self) -> Iterable:
return self.cloud.list_servers()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_server(resource['id'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "VM (id='{}', name='{}')".format(
resource['id'], resource['name'])
| apache-2.0 |
molobrakos/home-assistant | homeassistant/components/ihc/light.py | 7 | 4112 | """Support for IHC lights."""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from . import IHC_CONTROLLER, IHC_DATA, IHC_INFO
from .const import CONF_DIMMABLE, CONF_OFF_ID, CONF_ON_ID
from .ihcdevice import IHCDevice
from .util import async_pulse, async_set_bool, async_set_int
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC lights platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device['ihc_id']
product_cfg = device['product_cfg']
product = device['product']
# Find controller that corresponds with device id
ctrl_id = device['ctrl_id']
ihc_key = IHC_DATA.format(ctrl_id)
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
ihc_off_id = product_cfg.get(CONF_OFF_ID)
ihc_on_id = product_cfg.get(CONF_ON_ID)
dimmable = product_cfg[CONF_DIMMABLE]
light = IhcLight(ihc_controller, name, ihc_id, ihc_off_id, ihc_on_id,
info, dimmable, product)
devices.append(light)
add_entities(devices)
class IhcLight(IHCDevice, Light):
"""Representation of a IHC light.
For dimmable lights, the associated IHC resource should be a light
level (integer). For non dimmable light the IHC resource should be
an on/off (boolean) resource
"""
def __init__(self, ihc_controller, name, ihc_id: int, ihc_off_id: int,
ihc_on_id: int, info: bool, dimmable=False,
product=None) -> None:
"""Initialize the light."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._ihc_off_id = ihc_off_id
self._ihc_on_id = ihc_on_id
self._brightness = 0
self._dimmable = dimmable
self._state = None
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._state
@property
def supported_features(self):
"""Flag supported features."""
if self._dimmable:
return SUPPORT_BRIGHTNESS
return 0
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
else:
brightness = self._brightness
if brightness == 0:
brightness = 255
if self._dimmable:
await async_set_int(self.hass, self.ihc_controller,
self.ihc_id, int(brightness * 100 / 255))
else:
if self._ihc_on_id:
await async_pulse(self.hass, self.ihc_controller,
self._ihc_on_id)
else:
await async_set_bool(self.hass, self.ihc_controller,
self.ihc_id, True)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
if self._dimmable:
await async_set_int(self.hass, self.ihc_controller,
self.ihc_id, 0)
else:
if self._ihc_off_id:
await async_pulse(self.hass, self.ihc_controller,
self._ihc_off_id)
else:
await async_set_bool(self.hass, self.ihc_controller,
self.ihc_id, False)
def on_ihc_change(self, ihc_id, value):
"""Handle IHC notifications."""
if isinstance(value, bool):
self._dimmable = False
self._state = value != 0
else:
self._dimmable = True
self._state = value > 0
if self._state:
self._brightness = int(value * 255 / 100)
self.schedule_update_ha_state()
| apache-2.0 |
kylazhang/virt-test | virttest/video_maker.py | 3 | 6816 | """
Video Maker transforms screenshots taken during a test into a HTML 5
compatible video, so that one can watch the screen activity of the
whole test from inside your own browser.
This relies on generally available multimedia libraries, frameworks
and tools.
"""
import os
import time
import glob
import logging
__all__ = ['GstPythonVideoMaker', 'video_maker']
#
# Check what kind of video libraries tools we have available
#
# Gstreamer python bindings are our first choice
try:
import gst
GST_PYTHON_INSTALLED = True
except ImportError:
GST_PYTHON_INSTALLED = False
#
# PIL is also required to normalize images
#
try:
import PIL.Image
PIL_INSTALLED = True
except ImportError:
PIL_INSTALLED = False
#
# We only do video
#
CONTAINER_PREFERENCE = ['ogg', 'webm']
ENCODER_PREFERENCE = ['theora', 'vp8']
class GstPythonVideoMaker(object):
'''
Makes a movie out of screendump images using gstreamer-python
'''
CONTAINER_MAPPING = {'ogg': 'oggmux',
'webm': 'webmmux'}
ENCODER_MAPPING = {'theora': 'theoraenc',
'vp8': 'vp8enc'}
CONTAINER_ENCODER_MAPPING = {'ogg': 'theora',
'webm': 'vp8'}
def __init__(self, verbose=False):
if not GST_PYTHON_INSTALLED:
raise ValueError('gstreamer-python library was not found')
if not PIL_INSTALLED:
raise ValueError('python-imaging library was not found')
self.verbose = verbose
def get_most_common_image_size(self, input_dir):
'''
Find the most common image size
'''
image_sizes = {}
image_files = glob.glob(os.path.join(input_dir, '*.jpg'))
for f in image_files:
i = PIL.Image.open(f)
if not image_sizes.has_key(i.size):
image_sizes[i.size] = 1
else:
image_sizes[i.size] += 1
most_common_size_counter = 0
most_common_size = None
for image_size, image_counter in image_sizes.items():
if image_counter > most_common_size_counter:
most_common_size_counter = image_counter
most_common_size = image_size
return most_common_size
def normalize_images(self, input_dir):
'''
GStreamer requires all images to be the same size, so we do it here
'''
image_size = self.get_most_common_image_size(input_dir)
if image_size is None:
image_size = (800, 600)
if self.verbose:
logging.debug('Normalizing image files to size: %s', image_size)
image_files = glob.glob(os.path.join(input_dir, '*.jpg'))
for f in image_files:
i = PIL.Image.open(f)
if i.size != image_size:
i.resize(image_size).save(f)
def has_element(self, kind):
'''
Returns True if a gstreamer element is available
'''
return gst.element_factory_find(kind) is not None
def get_container_name(self):
'''
Gets the video container available that is the best based on preference
'''
for c in CONTAINER_PREFERENCE:
element_kind = self.CONTAINER_MAPPING.get(c, c)
if self.has_element(element_kind):
return element_kind
raise ValueError('No suitable container format was found')
def get_encoder_name(self):
'''
Gets the video encoder available that is the best based on preference
'''
for c in ENCODER_PREFERENCE:
element_kind = self.ENCODER_MAPPING.get(c, c)
if self.has_element(element_kind):
return element_kind
raise ValueError('No suitable encoder format was found')
def get_element(self, name):
'''
Makes and returns and element from the gst factory interface
'''
if self.verbose:
logging.debug('GStreamer element requested: %s', name)
return gst.element_factory_make(name, name)
def start(self, input_dir, output_file):
'''
Process the input files and output the video file
'''
self.normalize_images(input_dir)
no_files = len(glob.glob(os.path.join(input_dir, '*.jpg')))
if self.verbose:
logging.debug('Number of files to encode as video: %s', no_files)
pipeline = gst.Pipeline("pipeline")
source = self.get_element("multifilesrc")
source_location = os.path.join(input_dir, "%04d.jpg")
if self.verbose:
logging.debug("Source location: %s", source_location)
source.set_property('location', source_location)
source.set_property('index', 1)
source_caps = gst.Caps()
source_caps.append('image/jpeg,framerate=(fraction)4/1')
source.set_property('caps', source_caps)
decoder = self.get_element("jpegdec")
# Attempt to auto detect the chosen encoder/mux based on output_file
encoder = None
container = None
for container_name in self.CONTAINER_ENCODER_MAPPING:
if output_file.endswith('.%s' % container_name):
enc_name = self.CONTAINER_ENCODER_MAPPING[container_name]
enc_name_gst = self.ENCODER_MAPPING[enc_name]
encoder = self.get_element(enc_name_gst)
cont_name_gst = self.CONTAINER_MAPPING[container_name]
container = self.get_element(cont_name_gst)
# If auto detection fails, choose from the list of preferred codec/mux
if encoder is None:
encoder = self.get_element(self.get_encoder_name())
if container is None:
container = self.get_element(self.get_container_name())
output = self.get_element("filesink")
output.set_property('location', output_file)
pipeline.add_many(source, decoder, encoder, container, output)
gst.element_link_many(source, decoder, encoder, container, output)
pipeline.set_state(gst.STATE_PLAYING)
while True:
if source.get_property('index') <= no_files:
if self.verbose:
logging.debug("Currently processing image number: %s",
source.get_property('index'))
time.sleep(1)
else:
break
time.sleep(3)
pipeline.set_state(gst.STATE_NULL)
def video_maker(input_dir, output_file):
'''
Instantiates and runs a video maker
'''
v = GstPythonVideoMaker()
v.start(input_dir, output_file)
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s <input_dir> <output_file>' % sys.argv[0]
else:
video_maker(sys.argv[1], sys.argv[2])
| gpl-2.0 |
jamesbeebop/CouchPotatoServer | libs/oauth2/clients/imap.py | 885 | 1685 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| gpl-3.0 |
goofwear/raspberry_pwn | src/pentest/voiper/sulley/sulley/primitives.py | 8 | 30910 | import random
import struct
import string
import os
########################################################################################################################
class base_primitive (object):
'''
The primitive base class implements common functionality shared across most primitives.
'''
def __init__ (self):
self.fuzz_complete = False # this flag is raised when the mutations are exhausted.
self.fuzz_library = [] # library of static fuzz heuristics to cycle through.
self.fuzzable = True # flag controlling whether or not the given primitive is to be fuzzed.
self.mutant_index = 0 # current mutation index into the fuzz library.
self.original_value = None # original value of primitive.
self.rendered = "" # rendered value of primitive.
self.value = None # current value of primitive.
def exhaust (self):
'''
Exhaust the possible mutations for this primitive.
@rtype: Integer
@return: The number of mutations to reach exhaustion
'''
num = self.num_mutations() - self.mutant_index
self.fuzz_complete = True
self.mutant_index = self.num_mutations()
self.value = self.original_value
return num
def mutate (self):
'''
Mutate the primitive by stepping through the fuzz library, return False on completion.
@rtype: Boolean
@return: True on success, False otherwise.
'''
# if we've ran out of mutations, raise the completion flag.
if self.mutant_index == self.num_mutations():
self.fuzz_complete = True
# if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored.
if not self.fuzzable or self.fuzz_complete:
self.value = self.original_value
return False
# update the current value from the fuzz library.
self.value = self.fuzz_library[self.mutant_index]
# increment the mutation count.
self.mutant_index += 1
return True
def num_mutations (self):
'''
Calculate and return the total number of mutations for this individual primitive.
@rtype: Integer
@return: Number of mutated forms this primitive can take
'''
return len(self.fuzz_library)
def render (self):
'''
Nothing fancy on render, simply return the value.
'''
self.rendered = self.value
return self.rendered
def reset (self):
'''
Reset this primitive to the starting mutation state.
'''
self.fuzz_complete = False
self.mutant_index = 0
self.value = self.original_value
########################################################################################################################
class delim (base_primitive):
def __init__ (self, value, fuzzable=True, name=None):
'''
Represent a delimiter such as :,\r,\n, ,=,>,< etc... Mutations include repetition, substitution and exclusion.
@type value: Character
@param value: Original value
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
self.value = self.original_value = value
self.fuzzable = fuzzable
self.name = name
self.s_type = "delim" # for ease of object identification
self.rendered = "" # rendered value
self.fuzz_complete = False # flag if this primitive has been completely fuzzed
self.fuzz_library = [] # library of fuzz heuristics
self.mutant_index = 0 # current mutation number
#
# build the library of fuzz heuristics.
#
# if the default delim is not blank, repeat it a bunch of times.
if self.value:
self.fuzz_library.append(self.value * 2)
self.fuzz_library.append(self.value * 5)
self.fuzz_library.append(self.value * 10)
self.fuzz_library.append(self.value * 25)
self.fuzz_library.append(self.value * 100)
self.fuzz_library.append(self.value * 500)
self.fuzz_library.append(self.value * 1000)
# try ommitting the delimiter.
self.fuzz_library.append("")
# if the delimiter is a space, try throwing out some tabs.
if self.value == " ":
self.fuzz_library.append("\t")
self.fuzz_library.append("\t" * 2)
self.fuzz_library.append("\t" * 100)
# toss in some other common delimiters:
self.fuzz_library.append(" ")
self.fuzz_library.append("\t")
self.fuzz_library.append("\t " * 100)
self.fuzz_library.append("\t\r\n" * 100)
self.fuzz_library.append("!")
self.fuzz_library.append("@")
self.fuzz_library.append("#")
self.fuzz_library.append("$")
self.fuzz_library.append("%")
self.fuzz_library.append("^")
self.fuzz_library.append("&")
self.fuzz_library.append("*")
self.fuzz_library.append("(")
self.fuzz_library.append(")")
self.fuzz_library.append("-")
self.fuzz_library.append("_")
self.fuzz_library.append("+")
self.fuzz_library.append("=")
self.fuzz_library.append(":")
self.fuzz_library.append(": " * 100)
self.fuzz_library.append(":7" * 100)
self.fuzz_library.append(";")
self.fuzz_library.append("'")
self.fuzz_library.append("\"")
self.fuzz_library.append("/")
self.fuzz_library.append("\\")
self.fuzz_library.append("?")
self.fuzz_library.append("<")
self.fuzz_library.append(">")
self.fuzz_library.append(".")
self.fuzz_library.append(",")
self.fuzz_library.append("\r")
self.fuzz_library.append("\n")
self.fuzz_library.append("\r\n" * 64)
self.fuzz_library.append("\r\n" * 128)
self.fuzz_library.append("\r\n" * 512)
########################################################################################################################
class group (base_primitive):
def __init__ (self, name, values):
'''
This primitive represents a list of static values, stepping through each one on mutation. You can tie a block
to a group primitive to specify that the block should cycle through all possible mutations for *each* value
within the group. The group primitive is useful for example for representing a list of valid opcodes.
@type name: String
@param name: Name of group
@type values: List or raw data
@param values: List of possible raw values this group can take.
'''
self.name = name
self.values = values
self.fuzzable = True
self.s_type = "group"
self.value = self.values[0]
self.original_value = self.values[0]
self.rendered = ""
self.fuzz_complete = False
self.mutant_index = 1 # XXX - should start mutating at 1, since the first item is the default. right?
# sanity check that values list only contains strings (or raw data)
if self.values != []:
for val in self.values:
assert type(val) is str, "Value list may only contain strings or raw data"
def mutate (self):
'''
Move to the next item in the values list.
@rtype: False
@return: False
'''
if self.mutant_index == self.num_mutations():
self.fuzz_complete = True
# if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored.
if not self.fuzzable or self.fuzz_complete:
self.value = self.values[0]
return False
# step through the value list.
self.value = self.values[self.mutant_index]
# increment the mutation count.
self.mutant_index += 1
return True
def num_mutations (self):
'''
Number of values in this primitive.
@rtype: Integer
@return: Number of values in this primitive.
'''
return len(self.values)
########################################################################################################################
class random_data (base_primitive):
def __init__ (self, value, min_length, max_length, max_mutations=25, fuzzable=True, name=None):
'''
Generate a random chunk of data while maintaining a copy of the original. A random length range can be specified.
For a static length, set min/max length to be the same.
@type value: Raw
@param value: Original value
@type min_length: Integer
@param min_length: Minimum length of random block
@type max_length: Integer
@param max_length: Maximum length of random block
@type max_mutations: Integer
@param max_mutations: (Optional, def=25) Number of mutations to make before reverting to default
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
self.value = self.original_value = str(value)
self.min_length = min_length
self.max_length = max_length
self.max_mutations = max_mutations
self.fuzzable = fuzzable
self.name = name
self.s_type = "random_data" # for ease of object identification
self.rendered = "" # rendered value
self.fuzz_complete = False # flag if this primitive has been completely fuzzed
self.mutant_index = 0 # current mutation number
def mutate (self):
'''
Mutate the primitive value returning False on completion.
@rtype: Boolean
@return: True on success, False otherwise.
'''
# if we've ran out of mutations, raise the completion flag.
if self.mutant_index == self.num_mutations():
self.fuzz_complete = True
# if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored.
if not self.fuzzable or self.fuzz_complete:
self.value = self.original_value
return False
# select a random length for this string.
length = random.randint(self.min_length, self.max_length)
# reset the value and generate a random string of the determined length.
self.value = ""
for i in xrange(length):
self.value += chr(random.randint(0, 255))
# increment the mutation count.
self.mutant_index += 1
return True
def num_mutations (self):
'''
Calculate and return the total number of mutations for this individual primitive.
@rtype: Integer
@return: Number of mutated forms this primitive can take
'''
return self.max_mutations
########################################################################################################################
class static (base_primitive):
def __init__ (self, value, name=None):
'''
Primitive that contains static content.
@type value: Raw
@param value: Raw static data
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
self.value = self.original_value = value
self.name = name
self.fuzzable = False # every primitive needs this attribute.
self.mutant_index = 0
self.s_type = "static" # for ease of object identification
self.rendered = ""
self.fuzz_complete = True
def mutate (self):
'''
Do nothing.
@rtype: False
@return: False
'''
return False
def num_mutations (self):
'''
Return 0.
@rtype: 0
@return: 0
'''
return 0
########################################################################################################################
long_strings = []
#gen_strings()
def add_long_strings(sequence, max_len):
'''
Given a sequence, generate a number of selectively chosen strings lengths of the given sequence and add to the
string heuristic library.
@type sequence: String
@param sequence: Sequence to repeat for creation of fuzz strings.
'''
length = 2**7
power = 7
while length <= max_len:
long_string = sequence * length
long_strings.append(long_string)
long_string = sequence * (length + 1)
long_strings.append(long_string)
long_string = sequence * (length - 1)
long_strings.append(long_string)
power += 1
length = 2**power
def gen_strings(max_len=8192):
# add some long strings.
add_long_strings("A", max_len)
add_long_strings("B", max_len)
add_long_strings("1", max_len)
add_long_strings("2", max_len)
add_long_strings("<", max_len)
add_long_strings(">", max_len)
add_long_strings("'", max_len)
add_long_strings("\"", max_len)
add_long_strings("/", max_len)
add_long_strings("\\", max_len)
add_long_strings("?", max_len)
add_long_strings("=", max_len)
add_long_strings("a=", max_len)
add_long_strings("&", max_len)
add_long_strings(".", max_len)
add_long_strings(",", max_len)
add_long_strings("(", max_len)
add_long_strings(")", max_len)
add_long_strings("]", max_len)
add_long_strings("[", max_len)
add_long_strings("%", max_len)
add_long_strings("*", max_len)
add_long_strings("-", max_len)
add_long_strings("+", max_len)
add_long_strings("{", max_len)
add_long_strings("}", max_len)
add_long_strings("\x14", max_len)
add_long_strings("\xFE", max_len) # expands to 4 characters under utf16
add_long_strings("\xFF", max_len) # expands to 4 characters under utf16
# add some long strings with null bytes etc thrown in the middle of it.
length = 2**7
power = 7
while length <= max_len:
s = "B" * length
for val in ['\x00', ':', '.', ';', ',', '\r\n', '\r', '\n']:
tmp = s[:len(s)/2] + val + s[len(s)/2:]
long_strings.append(tmp)
power += 1
length = 2**power
# add some long strings with non-repeating elements. An attempt to avoid overflow detection
# chars = string.letters + string.digits
# for length in [128, 255, 256, 257, 511, 512, 513, 1023, 1024, 2048, 2049, 4095, 4096, 4097, 5000, 10000, 20000,
# 32762, 32763, 32764, 32765, 32766, 32767, 32768, 32769, 0xFFFF]:
# long_strings.append(os.urandom(length))
class string (base_primitive):
def __init__ (self, value, size=-1, padding="\x00", encoding="ascii", fuzzable=True, name=None):
'''
Primitive that cycles through a library of "bad" strings.
@type value: String
@param value: Default string value
@type size: Integer
@param size: (Optional, def=-1) Static size of this field, leave -1 for dynamic.
@type padding: Character
@param padding: (Optional, def="\\x00") Value to use as padding to fill static field size.
@type encoding: String
@param encoding: (Optonal, def="ascii") String encoding, ex: utf_16_le for Microsoft Unicode.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
self.value = self.original_value = value
self.size = size
self.padding = padding
self.encoding = encoding
self.fuzzable = fuzzable
self.name = name
self.s_type = "string" # for ease of object identification
self.rendered = "" # rendered value
self.fuzz_complete = False # flag if this primitive has been completely fuzzed
self.mutant_index = 0 # current mutation number
self.fuzz_library = \
[
# omission and repetition.
"",
self.value * 2,
self.value * 10,
self.value * 100,
# UTF-8
self.value * 2 + "\xfe",
self.value * 10 + "\xfe",
self.value * 100 + "\xfe",
# strings ripped from spike (and some others I added)
"/.:/" + "A"*5000 + "\x00\x00",
"/.../" + "A"*5000 + "\x00\x00",
"/.../.../.../.../.../.../.../.../.../.../",
"/../../../../../../../../../../../../etc/passwd",
"/../../../../../../../../../../../../boot.ini",
"..:..:..:..:..:..:..:..:..:..:..:..:..:",
"\\\\*",
"\\\\?\\",
"/\\" * 5000,
"/." * 5000,
"!@#$%%^#$%#$@#$%$$@#$%^^**(()",
"%01%02%03%04%0a%0d%0aADSF",
"%01%02%03@%04%0a%0d%0aADSF",
"/%00/",
"%00/",
"%00",
"%u0000",
# format strings.
"%n" * 100,
"%n" * 500,
"\"%n\"" * 500,
"%s" * 100,
"%s" * 500,
"\"%s\"" * 500,
# command injection.
"|touch /tmp/SULLEY",
";touch /tmp/SULLEY;",
"|notepad",
";notepad;",
"\nnotepad\n",
# SQL injection.
"1;SELECT%20*",
"'sqlattempt1",
"(sqlattempt2)",
"OR%201=1",
# some binary strings.
"\xde\xad\xbe\xef",
"\xde\xad\xbe\xef" * 10,
"\xde\xad\xbe\xef" * 100,
"\xde\xad\xbe\xef" * 1000,
"\xde\xad\xbe\xef" * 10000,
"\x00" * 1000,
# miscellaneous.
"\r\n" * 100,
"<>" * 500, # sendmail crackaddr (http://lsd-pl.net/other/sendmail.txt)
]
self.fuzz_library.extend(long_strings)
# if the optional file '.fuzz_strings' is found, parse each line as a new entry for the fuzz library.
try:
fh = open(".fuzz_strings", "r")
for fuzz_string in fh.readlines():
fuzz_string = fuzz_string.rstrip("\r\n")
if fuzz_string != "":
self.fuzz_library.append(fuzz_string)
fh.close()
except:
pass
# truncate fuzz library items to user-supplied length and pad, removing duplicates.
unique_mutants = []
if self.size != -1:
for mutant in self.fuzz_library:
# truncate.
if len(mutant) > self.size:
mutant = mutant[:self.size]
# pad.
elif len(mutant) < self.size:
mutant = mutant + self.padding * (self.size - len(mutant))
# add to unique list.
if mutant not in unique_mutants:
unique_mutants.append(mutant)
# assign unique list as fuzz library.
self.fuzz_library = unique_mutants
def render (self):
'''
Render the primitive, encode the string according to the specified encoding.
'''
# try to encode the string properly and fall back to the default value on failure.
try:
self.rendered = str(self.value).encode(self.encoding)
except:
self.rendered = self.value
return self.rendered
########################################################################################################################
class bit_field (base_primitive):
def __init__ (self, value, width, max_num=None, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None, hex_vals=False):
'''
The bit field primitive represents a number of variable length and is used to define all other integer types.
@type value: Integer
@param value: Default integer value
@type width: Integer
@param width: Width of bit fields
@type endian: Character
@param endian: (Optional, def=LITTLE_ENDIAN) Endianess of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >)
@type format: String
@param format: (Optional, def=binary) Output format, "binary" or "ascii"
@type signed: Boolean
@param signed: (Optional, def=False) Make size signed vs. unsigned (applicable only with format="ascii")
@type full_range: Boolean
@param full_range: (Optional, def=False) If enabled the field mutates through *all* possible values.
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
@type hex_vals: Boolean
@param hex_vals: (Optional, def=False) Only applicable when format="ascii". Return the hex representation of the fuzz values
'''
assert(type(value) is int or type(value) is long)
assert(type(width) is int or type(value) is long)
self.value = self.original_value = value
# if hex_vals is true then what we want to be in value is the hex equivalent of
# whatever the value is. It has already been converted to an int so we just
# convert it now to its hex string
if hex_vals == True:
self.value = hex(self.value)
self.original_value = hex(self.original_value)
self.width = width
self.max_num = max_num
self.endian = endian
self.format = format
self.signed = signed
self.full_range = full_range
self.fuzzable = fuzzable
self.name = name
self.hex_vals = hex_vals
self.rendered = "" # rendered value
self.fuzz_complete = False # flag if this primitive has been completely fuzzed
self.fuzz_library = [] # library of fuzz heuristics
self.mutant_index = 0 # current mutation number
if self.max_num == None:
self.max_num = self.to_decimal("1" * width)
assert(type(self.max_num) is int or type(self.max_num) is long)
# build the fuzz library.
if self.full_range:
# add all possible values.
for i in xrange(0, self.max_num):
self.fuzz_library.append(i)
else:
# try only "smart" values.
self.add_integer_boundaries(0)
self.add_integer_boundaries(self.max_num / 2)
self.add_integer_boundaries(self.max_num / 3)
self.add_integer_boundaries(self.max_num / 4)
self.add_integer_boundaries(self.max_num / 8)
self.add_integer_boundaries(self.max_num / 16)
self.add_integer_boundaries(self.max_num / 32)
self.add_integer_boundaries(self.max_num)
# if the optional file '.fuzz_ints' is found, parse each line as a new entry for the fuzz library.
try:
fh = open(".fuzz_ints", "r")
for fuzz_int in fh.readlines():
# convert the line into an integer, continue on failure.
try:
fuzz_int = long(fuzz_int, 16)
except:
continue
if fuzz_int <= self.max_num:
self.fuzz_library.append(fuzz_int)
fh.close()
except:
pass
def add_integer_boundaries (self, integer):
'''
Add the supplied integer and border cases to the integer fuzz heuristics library.
@type integer: Int
@param integer: Integer to append to fuzz heuristics
'''
for i in xrange(-10, 10):
case = integer + i
# ensure the border case falls within the valid range for this field.
if 0 <= case <= self.max_num:
if case not in self.fuzz_library:
self.fuzz_library.append(case)
def render (self):
'''
Render the primitive.
'''
#
# binary formatting.
#
# if hex_vals is true the value has already been converted to a string of the hex representation
# of the value so first convert it back for these operations e.g 0xFFFF would have been converted to
# 65535 then to 'FFFF' and now we need 65535 back for a few operations. All of this is so I didn't have to
# make major changes in Sulley. We only do this if it is the original_value in place, otherwise
# no changes are needed
tmp_val = self.value
tmp_orig_val = self.original_value
if self.value == self.original_value and self.hex_vals == True:
self.value = int(self.value, 16)
self.original_value = int(self.original_value, 16)
if self.format == "binary":
bit_stream = ""
rendered = ""
# pad the bit stream to the next byte boundary.
if self.width % 8 == 0:
bit_stream += self.to_binary()
else:
bit_stream = "0" * (8 - (self.width % 8))
bit_stream += self.to_binary()
# convert the bit stream from a string of bits into raw bytes.
for i in xrange(len(bit_stream) / 8):
chunk = bit_stream[8*i:8*i+8]
rendered += struct.pack("B", self.to_decimal(chunk))
# if necessary, convert the endianess of the raw bytes.
if self.endian == "<":
rendered = list(rendered)
rendered.reverse()
rendered = "".join(rendered)
self.rendered = rendered
#
# ascii formatting.
#
else:
# if the sign flag is raised and we are dealing with a signed integer (first bit is 1).
if self.signed and self.to_binary()[0] == "1":
max_num = self.to_decimal("0" + "1" * (self.width - 1))
# chop off the sign bit.
val = self.value & max_num
# account for the fact that the negative scale works backwards.
val = max_num - val
# toss in the negative sign.
self.rendered = "%d" % ~val
# unsigned integer or positive signed integer.
else:
self.rendered = "%d" % self.value
# if we want the hex value in ascii form e.g A instead of 10
if self.hex_vals == True:
self.rendered = hex(int(self.rendered))[2:]
self.value = tmp_val
self.original_value = tmp_orig_val
return self.rendered
def to_binary (self, number=None, bit_count=None):
'''
Convert a number to a binary string.
@type number: Integer
@param number: (Optional, def=self.value) Number to convert
@type bit_count: Integer
@param bit_count: (Optional, def=self.width) Width of bit string
@rtype: String
@return: Bit string
'''
if number == None:
number = self.value
if bit_count == None:
bit_count = self.width
return "".join(map(lambda x:str((number >> x) & 1), range(bit_count -1, -1, -1)))
def to_decimal (self, binary):
'''
Convert a binary string to a decimal number.
@type binary: String
@param binary: Binary string
@rtype: Integer
@return: Converted bit string
'''
return int(binary, 2)
########################################################################################################################
class byte (bit_field):
def __init__ (self, value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None, hex_vals=False):
self.s_type = "byte"
if type(value) not in [int, long]:
value = struct.unpack(endian + "B", value)[0]
bit_field.__init__(self, value, 8, None, endian, format, signed, full_range, fuzzable, name, hex_vals)
########################################################################################################################
class word (bit_field):
def __init__ (self, value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None, hex_vals=False):
self.s_type = "word"
if type(value) not in [int, long]:
value = struct.unpack(endian + "H", value)[0]
bit_field.__init__(self, value, 16, None, endian, format, signed, full_range, fuzzable, name, hex_vals)
########################################################################################################################
class dword (bit_field):
def __init__ (self, value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None, hex_vals=False):
self.s_type = "dword"
if type(value) not in [int, long]:
value = struct.unpack(endian + "L", value)[0]
bit_field.__init__(self, value, 32, None, endian, format, signed, full_range, fuzzable, name, hex_vals)
########################################################################################################################
class qword (bit_field):
def __init__ (self, value, endian="<", format="binary", signed=False, full_range=False, fuzzable=True, name=None, hex_vals=False):
self.s_type = "qword"
if type(value) not in [int, long]:
value = struct.unpack(endian + "Q", value)[0]
bit_field.__init__(self, value, 64, None, endian, format, signed, full_range, fuzzable, name, hex_vals)
| gpl-3.0 |
dkuner/example-modules | modules/modeling/CDH4/hive_atomics/hive_union/specparser.py | 38 | 27392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, DataCanvasIO
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
A minimum spec.json parser.
"""
__version__ = "0.2.14"
__author__ = "xiaolin"
import json
from collections import namedtuple
import types
import re
import os
import sys
import time
import itertools
import subprocess
def gettype(name):
type_map = {
"string" : "str",
"integer" : "int",
"float" : "float",
"enum" : "str",
"file" : "str"
}
if name not in type_map:
raise ValueError(name)
t = __builtins__.get(type_map[name], types.StringType)
if isinstance(t, type):
return t
raise ValueError(name)
def read_whole_file(filename):
with open(filename, "r") as f:
return f.read()
class Input(str):
def __new__(self, x, _types):
return str.__new__(self, x)
def __init__(self, x, _types):
self.x = x
self._types = _types
def __repr__(self):
return str(self.x)
def __str__(self):
return str(self.x)
def as_first_line(self):
with open(self.x, "r") as f:
return f.readline().rstrip()
def as_whole(self):
with open(self.x, "r") as f:
return f.read()
def as_file(self, mode="r"):
return open(self.x, mode)
def as_datasource(self, mode="r"):
ds = json.loads(open(self.x, mode).read())
return ds
@property
def val(self):
# TODO: fix types handling
if any([re.match(r"datasource.*", t) for t in self._types]):
return self.as_datasource()['URL']
else:
return self.as_first_line()
@property
def types(self):
return self._types
class Output(str):
def __new__(self, x, _types):
return str.__new__(self, x)
def __init__(self, x, _types):
self.x = x
self._types = _types
def __repr__(self):
return str(self.x)
def __str__(self):
return str(self.x)
def as_first_line(self):
with open(self.x, "r") as f:
return f.readline().rstrip()
def as_whole(self):
with open(self.x, "r") as f:
return f.read()
def as_file(self, mode="r"):
return open(self.x, mode)
@property
def val(self):
return self.as_first_line()
@val.setter
def val(self, value):
with open(self.x, "w+") as f:
f.write(value)
@property
def types(self):
return self._types
class Param(str):
def __new__(self, x, typeinfo):
return str.__new__(self, x)
def __init__(self, x, typeinfo):
self._x = x
self._typeinfo = typeinfo
def __repr__(self):
return str(self._x)
def __str__(self):
return str(self._x)
@property
def val(self):
type_handler = {
"string" : lambda x: x,
"float" : lambda x: float(x),
"integer" : lambda x: int(x),
"enum" : lambda x: x,
"file" : read_whole_file
}
param_type = self._typeinfo['Type']
if param_type in type_handler:
return type_handler[param_type](self._x)
else:
return self._x
def input_output_builder(spec_input, spec_output):
import sys
params = dict(arg.split("=") for arg in sys.argv[1:])
if not all(k in params for k in spec_input.keys()):
raise ValueError("Missing input parameters")
if not all(k in params for k in spec_output.keys()):
raise ValueError("Missing output parameters")
InputSettings = namedtuple('InputSettings', spec_input.keys())
in_params = {in_k:Input(params[in_k], in_type) for in_k, in_type in spec_input.items()}
input_settings = InputSettings(**in_params)
OutputSettings = namedtuple('OutputSettings', spec_output.keys())
out_params = {out_k:Output(params[out_k], out_type) for out_k,out_type in spec_output.items()}
output_settings = OutputSettings(**out_params)
return input_settings, output_settings
def param_builder(spec_param, param_json):
def get_param(k):
if k in param_json:
return param_json[k]['Val']
else:
return spec_param[k]['Default']
ParamSettings = namedtuple('ParamSettings', spec_param.keys())
param_dict = {k:Param(get_param(k), v) for k, v in spec_param.items()}
env_settings = ParamSettings(**param_dict)
return env_settings
def global_param_builder(param_json):
return {k:v['Val'] for k, v in param_json.items()}
def get_settings(spec_json):
moderate_keys = ['Name', 'Param', 'Input', 'Output', 'Cmd', 'Description']
if not all(k in spec_json for k in moderate_keys):
raise ValueError("One of param from %s may not exist in 'spec.json'" % str(moderate_keys))
# TODO: condition for appending 'GlobalParam'
moderate_keys.append('GlobalParam')
ModuleSetting = namedtuple('ModuleSetting', moderate_keys)
# Load parameters
param_json = get_json_file(os.getenv("ZETRT"))
param = param_builder(spec_json['Param'], param_json['PARAM'])
json_input, json_output = input_output_builder(spec_json['Input'], spec_json['Output'])
# TODO:
global_param = global_param_builder(param_json['GLOBAL_PARAM'])
settings = ModuleSetting(Name=spec_json['Name'], Description=spec_json['Description'], Param=param, Input=json_input, Output=json_output, Cmd=spec_json['Cmd'], GlobalParam=global_param)
return settings
def get_json_file(filename):
with open(filename, "r") as f:
return json.load(f)
def get_settings_from_file(filename):
with open(filename, "r") as f:
return get_settings(json.load(f))
def get_settings_from_string(spec_json_str):
print(json.loads(spec_json_str))
return get_settings(json.loads(spec_json_str))
# Various Runtime: Hive, Hadoop, Pig
class ZetRuntime(object):
def __init__(self, spec_filename="spec.json"):
self.settings = get_settings_from_file(spec_filename)
def __repr__(self):
return str(self.settings)
class HadoopRuntime(ZetRuntime):
def __init__(self, spec_filename="spec.json"):
super(HadoopRuntime, self).__init__(spec_filename=spec_filename)
@property
def hdfs_root(self):
ps = self.settings
if 'hdfs_root' in ps.Param._asdict():
return ps.Param.hdfs_root.val
else:
return '/'
def get_hdfs_working_dir(self, path=""):
ps = self.settings
glb_vars = ps.GlobalParam
# return os.path.join(self.hdfs_root, 'tmp/zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path)
remote_path = os.path.normpath(os.path.join('tmp/zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path))
return os.path.join(self.hdfs_root, remote_path)
def get_hive_namespace(self):
ps = self.settings
glb_vars = ps.GlobalParam
return "zetjobns_%s_job%s_blk%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId'])
def hdfs_upload_dir(self, local_dir):
for root_dir, dirs, files in os.walk(local_dir):
for f in sorted(files):
f = os.path.normpath(os.path.join(root_dir, f))
f_remote = self.get_hdfs_working_dir(f)
hdfs_safe_upload(f, f_remote)
yield f_remote
def hdfs_clean_working_dir(self):
hdfs_working_dir = self.get_hdfs_working_dir()
if not clean_hdfs_path(hdfs_working_dir):
# TODO : refactor to 'HiveException'
raise Exception("Can not clean hdfs path : %s" % hdfs_working_dir)
def clean_working_dir(self):
self.hdfs_clean_working_dir()
class EmrRuntime(HadoopRuntime):
def __init__(self, spec_filename="spec.json"):
import boto
from boto.emr.connection import EmrConnection, RegionInfo
super(EmrRuntime, self).__init__(spec_filename)
p = self.settings.Param
self.s3_conn = boto.connect_s3(p.AWS_ACCESS_KEY_ID, p.AWS_ACCESS_KEY_SECRET)
self.s3_bucket = self.s3_conn.get_bucket(p.S3_BUCKET)
self.region = p.AWS_Region
self.emr_conn = EmrConnection(p.AWS_ACCESS_KEY_ID, p.AWS_ACCESS_KEY_SECRET,
region = RegionInfo(name = self.region,
endpoint = self.region + '.elasticmapreduce.amazonaws.com'))
self.job_flow_id = p.EMR_jobFlowId
def get_s3_working_dir(self, path=""):
ps = self.settings
glb_vars = ps.GlobalParam
remote_path = os.path.normpath(os.path.join(self.s3_bucket.name, 'zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'], path))
return os.path.join("s3://", remote_path)
def get_emr_job_name(self):
ps = self.settings
glb_vars = ps.GlobalParam
return os.path.join('zetjob', glb_vars['userName'], "job%s" % glb_vars['jobId'], "blk%s" % glb_vars['blockId'])
def s3_upload_dir(self, local_dir):
print("EmrHiveRuntime.s3_uploader()")
print("s3_upload_dir :::: %s" % local_dir)
if local_dir == "":
return
if not os.path.isdir(local_dir):
return
s3_upload_dir = self.get_s3_working_dir(local_dir)
ext_files = [f for f in sorted(os.listdir(local_dir)) if os.path.isfile(os.path.join(local_dir,f))]
for f in ext_files:
f_local = os.path.join(local_dir, f)
f_remote_full = self.get_s3_working_dir(os.path.join(local_dir, f))
print("S3 Upload :: %s ====> %s" % (f_local, s3_upload_dir))
print("S3 remote_full :: %s" % f_remote_full)
yield s3_upload(self.s3_bucket, f_local, f_remote_full)
def s3_clean_working_dir(self):
s3_working_dir = self.get_s3_working_dir()
if not s3_delete(self.s3_bucket, s3_working_dir):
# TODO : refactor to 'HiveException'
raise Exception("Can not clean s3 path : %s" % s3_working_dir)
def s3_upload(self, filename):
from urlparse import urlparse
parse_ret = urlparse(filename)
if parse_ret.scheme == '':
s3_working_dir = self.get_s3_working_dir()
file_remote = os.path.join(s3_working_dir, os.path.normpath(os.path.basename(filename)))
file_remote_full = s3_upload(self.s3_bucket, filename, file_remote)
return file_remote_full
elif parse_ret.scheme == 's3':
return filename
else:
raise ValueError("Invalid filename to upload to s3: %s" % filename)
def clean_working_dir(self):
self.s3_clean_working_dir()
class HiveRuntime(HadoopRuntime):
def files_uploader(self, local_dir):
return self.hdfs_upload_dir(local_dir)
def hive_output_builder(self, output_name, output_obj):
# TODO: refactor this method
ps = self.settings
glb_vars = ps.GlobalParam
out_type = output_obj.types[0]
if out_type.startswith("hive.table"):
return "zetjob_%s_job%s_blk%s_OUTPUT_%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId'], output_name)
elif out_type.startswith("hive.hdfs"):
return self.get_hdfs_working_dir("OUTPUT_%s" % output_name)
else:
raise ValueError("Invalid type for hive, type must start with 'hive.table' or 'hive.hdfs'")
def header_builder(self, hive_ns, uploaded_files, uploaded_jars):
# Build Output Tables
for output_name,output_obj in self.settings.Output._asdict().items():
output_obj.val = self.hive_output_builder(output_name, output_obj)
return "\n".join(
itertools.chain(
["ADD FILE %s;" % f for f in uploaded_files],
["ADD JAR %s;" % f for f in uploaded_jars],
["set hivevar:MYNS = %s;" % hive_ns],
["set hivevar:PARAM_%s = %s;" % (k,v) for k,v in self.settings.Param._asdict().items()],
["set hivevar:INPUT_%s = %s;" % (k,v.val) for k,v in self.settings.Input._asdict().items()],
["set hivevar:OUTPUT_%s = %s;" % (k,v.val) for k,v in self.settings.Output._asdict().items()]))
def clean_working_dir(self):
self.hdfs_clean_working_dir()
def generate_script(self, hive_script, target_filename=None):
hive_ns = self.get_hive_namespace()
# Upload files and UDF jars
if 'FILE_DIR' in self.settings.Param._asdict():
file_dir = self.settings.Param.FILE_DIR
uploaded_files = self.files_uploader(file_dir.val)
else:
uploaded_files = []
if 'UDF_DIR' in self.settings.Param._asdict():
jar_dir = self.settings.Param.UDF_DIR
uploaded_jars = self.files_uploader(jar_dir.val)
else:
uploaded_jars = []
# Build Input, Output and Param
header = self.header_builder(hive_ns, uploaded_files, uploaded_jars)
if target_filename == None:
import tempfile
tmp_file = tempfile.NamedTemporaryFile(prefix="hive_generated_", suffix=".hql", delete=False)
tmp_file.close()
target_filename = tmp_file.name
with open(hive_script, "r") as f, open(target_filename, "w+") as out_f:
out_f.write("--------------------------\n")
out_f.write("-- Header\n")
out_f.write("--------------------------\n")
out_f.write(header)
out_f.write("\n")
out_f.write("--------------------------\n")
out_f.write("-- Main\n")
out_f.write("--------------------------\n")
out_f.write("\n")
out_f.write(f.read())
return target_filename
def execute(self, hive_script, generated_hive_script=None):
self.clean_working_dir()
generated_hive_script = self.generate_script(hive_script, generated_hive_script)
if cmd("beeline -u jdbc:hive2://%s:%s -n hive -p tiger -d org.apache.hive.jdbc.HiveDriver -f '%s' --verbose=true "
% (self.settings.Param.HiveServer2_Host, self.settings.Param.HiveServer2_Port, generated_hive_script)) != 0:
raise Exception("Failed to execute hive script : %s" % generated_hive_script)
class EmrHiveRuntime(EmrRuntime, HiveRuntime):
def __init__(self, spec_filename="spec.json"):
super(EmrHiveRuntime, self).__init__(spec_filename)
def hive_output_builder(self, output_name, output_obj):
# TODO : should refactor this function to base class
ps = self.settings
glb_vars = ps.GlobalParam
out_type = output_obj.types[0]
if out_type.startswith("hive.table"):
return "zetjob_%s_job%s_blk%s_OUTPUT_%s" % (glb_vars['userName'], glb_vars['jobId'], glb_vars['blockId'], output_name)
elif out_type.startswith("hive.hdfs"):
return self.get_hdfs_working_dir("OUTPUT_%s" % output_name)
elif out_type.startswith("hive.s3"):
return self.get_s3_working_dir("OUTPUT_%s" % output_name)
else:
raise ValueError("Invalid type for hive, type must start with 'hive.table' or 'hive.hdfs' or 'hive.s3'")
def files_uploader(self, local_dir):
return self.s3_upload_dir(local_dir)
def emr_execute_hive(self, s3_hive_script):
from boto.emr.step import HiveStep
hive_step = HiveStep(name=self.get_emr_job_name(), hive_file=s3_hive_script)
self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[hive_step])
emr_wait_job(self.emr_conn, self.job_flow_id)
def execute(self, main_hive_script, generated_hive_script=None):
self.clean_working_dir()
hive_script_local = self.generate_script(main_hive_script, generated_hive_script)
s3_working_dir = self.get_s3_working_dir()
hive_script_remote = os.path.join(s3_working_dir, os.path.basename(hive_script_local))
hive_script_remote_full = s3_upload(self.s3_bucket, hive_script_local, hive_script_remote)
print("========= Generated Hive Script =========")
print(open(hive_script_local).read())
print("=========================================")
print("EmrHiveRuntime.execute()")
self.emr_execute_hive(hive_script_remote_full)
class EmrJarRuntime(EmrRuntime):
def __init__(self, spec_filename="spec.json"):
super(EmrJarRuntime, self).__init__(spec_filename)
def execute(self, jar_path, args):
from boto.emr.step import JarStep
s3_jar_path = s3_upload(self.s3_bucket, jar_path, self.get_s3_working_dir(jar_path))
# s3_jar_path = "s3://run-jars/jar/mahout-core-1.0-SNAPSHOT-job.jar"
print("Uploading jar to s3 : %s -> %s" % (jar_path, s3_jar_path))
print("Add jobflow step")
step = JarStep(name='cl_filter', jar=s3_jar_path, step_args=args)
self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[step])
print("Waiting jobflow step done")
emr_wait_job(self.emr_conn, self.job_flow_id)
class PigRuntime(HadoopRuntime):
def __init__(self, spec_filename="spec.json"):
super(PigRuntime, self).__init__(spec_filename)
def files_uploader(self, local_dir):
return self.hdfs_upload_dir(local_dir)
def pig_output_builder(self, output_name, output_obj):
# TODO: refactor this method
ps = self.settings
glb_vars = ps.GlobalParam
out_type = output_obj.types[0]
if out_type.startswith("pig.hdfs"):
return self.get_hdfs_working_dir("OUTPUT_%s" % output_name)
else:
raise ValueError("Invalid type for pig, type must start with 'pig.hdfs'")
def header_builder(self, uploaded_jars):
# Build Output Tables
for output_name,output_obj in self.settings.Output._asdict().items():
output_obj.val = self.pig_output_builder(output_name, output_obj)
return "\n".join(
itertools.chain(
["%%declare PARAM_%s '%s'" % (k,v) for k,v in self.settings.Param._asdict().items()],
["%%declare INPUT_%s '%s'" % (k,v.val) for k,v in self.settings.Input._asdict().items()],
["%%declare OUTPUT_%s '%s'" % (k,v.val) for k,v in self.settings.Output._asdict().items()],
["REGISTER '%s';" % f for f in uploaded_jars]
))
def generate_script(self, pig_script, target_filename=None):
if 'UDF_DIR' in self.settings.Param._asdict():
jar_dir = self.settings.Param.UDF_DIR
uploaded_jars = self.files_uploader(jar_dir.val)
else:
uploaded_jars = []
# Build Input, Output and Param
header = self.header_builder(uploaded_jars)
if target_filename == None:
import tempfile
tmp_file = tempfile.NamedTemporaryFile(prefix="pig_generated_", suffix=".hql", delete=False)
tmp_file.close()
target_filename = tmp_file.name
with open(pig_script, "r") as f, open(target_filename, "w+") as out_f:
out_f.write("/*************************\n")
out_f.write(" * Header\n")
out_f.write(" *************************/\n")
out_f.write(header)
out_f.write("\n")
out_f.write("/*************************\n")
out_f.write(" * Main\n")
out_f.write(" *************************/\n")
out_f.write("\n")
out_f.write(f.read())
return target_filename
def generate_pig_conf(self):
ps = self.settings
glb_vars = ps.GlobalParam
with open("/home/run/pig.properties", "a") as pf:
pf.write("fs.default.name=%s\n" % ps.Param.hdfs_root)
pf.write("yarn.resourcemanager.address=%s\n" % ps.Param.yarn_address)
pf.write("yarn.resourcemanager.scheduler.address=%s\n" % ps.Param.yarn_scheduler_address)
cmd("cat /home/run/pig.properties")
def execute(self, pig_script):
self.clean_working_dir()
self.generate_pig_conf()
generated_pig_script = self.generate_script(pig_script)
print("========= Generated Pig Script =========")
print(open(generated_pig_script).read())
print("=========================================")
print("EmrHiveRuntime.execute()")
cmd("pig -x mapreduce -P /home/run/pig.properties %s" % generated_pig_script)
class EmrPigRuntime(EmrRuntime, PigRuntime):
def __init__(self, spec_filename="spec.json"):
super(EmrPigRuntime, self).__init__(spec_filename)
def files_uploader(self, local_dir):
return self.s3_upload_dir(local_dir)
def pig_output_builder(self, output_name, output_obj):
# TODO : should refactor this function to base class
ps = self.settings
glb_vars = ps.GlobalParam
out_type = output_obj.types[0]
if out_type.startswith("pig.hdfs"):
return self.get_hdfs_working_dir("OUTPUT_%s" % output_name)
elif out_type.startswith("pig.s3"):
return self.get_s3_working_dir("OUTPUT_%s" % output_name)
else:
raise ValueError("Invalid type for pig, type must start with 'pig.hdfs' or 'pig.s3'")
def emr_execute_pig(self, pig_filename):
from boto.emr.step import PigStep
s3_pig_script = self.s3_upload(pig_filename)
pig_step = PigStep(name=self.get_emr_job_name(), pig_file=s3_pig_script)
self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[pig_step])
emr_wait_job(self.emr_conn, self.job_flow_id)
def execute(self, pig_script):
self.clean_working_dir()
# TODO: upload S3 additional files
generated_pig_script = self.generate_script(pig_script)
print("========= Generated Pig Script =========")
print(open(generated_pig_script).read())
print("=========================================")
print("EmrHiveRuntime.execute()")
self.emr_execute_pig(generated_pig_script)
# Utility Functions
def clean_hdfs_path(p):
if cmd("hadoop fs -rm -r -f %s && hadoop fs -mkdir -p %s" % (p, p)) == 0:
return True
else:
return False
def hdfs_safe_upload(f_local, f_remote):
f_remote_dir = os.path.dirname(f_remote)
if cmd("hadoop fs -mkdir -p %s" % f_remote_dir) != 0:
raise Exception("Failed to create dir %s" % f_remote_dir)
print("HDFS Upload :: %s ====> %s" % (f_local, f_remote))
if cmd("hadoop fs -copyFromLocal %s %s" % (f_local, f_remote_dir)) != 0:
raise Exception("Failed to upload file %s to %s" % (f_local, f_remote_dir))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
def s3_delete(bucket, s3_path):
import boto
from urlparse import urlparse
print("s3_delete %s" % s3_path)
prefix_path = urlparse(s3_path).path[1:]
for key in bucket.list(prefix=prefix_path):
key.delete()
return True
def s3_upload(bucket, local_filename, remote_filename):
import boto
from urlparse import urlparse
# max size in bytes before uploading in parts.
# between 1 and 5 GB recommended
MAX_SIZE = 40 * 1000 * 1000
# size of parts when uploading in parts
PART_SIZE = 6 * 1000 * 1000
fn_local = os.path.normpath(local_filename)
fn_remote = urlparse(remote_filename).path
fn_remote_full = remote_filename
filesize = os.path.getsize(local_filename)
print("filesize = %d, maxsize = %d" % (filesize, MAX_SIZE))
if filesize > MAX_SIZE:
print("Multi-part uploading...")
print("From : %s" % fn_local)
print("To : %s" % fn_remote_full)
mp = bucket.initiate_multipart_upload(fn_local)
fp = open(sourcepath,'rb')
fp_num = 0
while (fp.tell() < filesize):
fp_num += 1
print "uploading part %i" % fp_num
mp.upload_part_from_file(fp, fp_num, cb=percent_cb, num_cb=10, size=PART_SIZE)
mp.complete_upload()
print("")
else:
print("Single-part upload...")
print("From : %s" % fn_local)
print("To : %s" % fn_remote_full)
k = boto.s3.key.Key(bucket)
k.key = fn_remote
k.set_contents_from_filename(fn_local, cb=percent_cb, num_cb=10)
print("")
return fn_remote_full
def emr_wait_job(emr_conn, job_flow_id):
blocking_states = ['STARTING', 'BOOTSTRAPPING', 'RUNNING']
cnt = 60 * 60 * 1 # 1 hour
time.sleep(10)
while cnt > 0:
jf_state = emr_conn.describe_jobflow(job_flow_id).state
print("jobflow_state = %s" % jf_state)
if jf_state not in blocking_states:
if jf_state == 'WAITING':
print("Job done, continue...")
return True
else:
print("Job may failed.")
return False
cnt = cnt - 1
time.sleep(10)
return False
def cmd(cmd_str):
print("Execute External Command : '%s'" % cmd_str)
ret = subprocess.call(cmd_str, shell=True)
print("Exit with exit code = %d" % ret)
return ret
if __name__ == "__main__":
# settings = get_settings_from_file("spec.json")
# print(settings)
# print(settings.Input)
# print(settings.Output)
# print("-----------------")
# i = Input("test.param")
# print(i)
# print(i.as_one_line())
# print(i.as_all_line())
# t = MyTest(4)
# print(t.val)
# t.val = 5
# print(t.val)
# o = Output("out.param")
# print(o)
# print(o.val)
# o.val = "cacaca"
# settings = get_settings_from_file("spec.json")
# hive_runtime = HiveRuntime()
# print(hive_runtime)
emr_hive_runtime = EmrHiveRuntime()
emr_hive_runtime.execute() | bsd-3-clause |
MartinDelzant/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
ericmjl/bokeh | examples/models/file/choropleth.py | 1 | 2189 | from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import ColorBar, ColumnDataSource, LinearColorMapper, Patches, Plot
from bokeh.palettes import Viridis11
from bokeh.resources import INLINE
from bokeh.sampledata import unemployment, us_counties, us_states
from bokeh.transform import transform
from bokeh.util.browser import view
us_states = us_states.data.copy()
us_counties = us_counties.data
unemployment = unemployment.data
del us_states["HI"]
del us_states["AK"]
state_source = ColumnDataSource(
data=dict(
state_xs=[us_states[code]["lons"] for code in us_states],
state_ys=[us_states[code]["lats"] for code in us_states],
)
)
cmap = LinearColorMapper(palette=Viridis11, low=min(unemployment.values()), high=max(unemployment.values()))
county_source = ColumnDataSource(
data=dict(
county_xs=[us_counties[code]["lons"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
county_ys=[us_counties[code]["lats"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
rate=[unemployment[code] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
)
)
plot = Plot(min_border=0, border_fill_color="white", plot_width=1300, plot_height=700)
plot.title.text = "2009 Unemployment Data"
plot.toolbar_location = None
county_patches = Patches(xs="county_xs", ys="county_ys", fill_color=transform("rate", cmap), fill_alpha=0.7, line_color="white", line_width=0.5)
plot.add_glyph(county_source, county_patches)
state_patches = Patches(xs="state_xs", ys="state_ys", fill_alpha=0.0, line_color="#884444", line_width=2)
plot.add_glyph(state_source, state_patches)
cbar = ColorBar(color_mapper=cmap, location=(0, 0))
plot.add_layout(cbar, 'left')
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "choropleth.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Choropleth of all US counties, Unemployment 2009"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
saurabh6790/omnitech-lib | conf/conf.py | 33 | 1034 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# app configuration
# database config
db_name = '%(db_name)s'
db_password = '%(db_password)s'
# user attachments stored in
files_path = 'public/files'
public_path = 'public'
# max file attachment size (default 1MB)
max_file_size = 1000000
# max email size in bytes
max_email_size = 0
# total pop session timeout in seconds
pop_timeout = 0
# generate schema (.txt files)
developer_mode = 0
# clear cache on refresh
auto_cache_clear = 0
# email logs to admin (beta)
admin_email_notification = 0
# user timezone
user_timezone = 'Asia/Calcutta'
# dump backups here
backup_path = 'public/backups'
# outgoing mail settings
mail_server = None
mail_login = None
mail_password = None
mail_port = None
use_ssl = None
auto_email_id = None
# logging settings
log_file_name = 'logs/error_log.txt'
debug_log_dbs = []
log_level = 'logging.INFO'
log_file_size = 5000
log_file_backup_count = 5
| mit |
CydarLtd/ansible | lib/ansible/modules/monitoring/sensu_subscription.py | 60 | 5312 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_subscription
short_description: Manage Sensu subscriptions
version_added: 2.2
description:
- Manage which I(sensu channels) a machine should subscribe to
options:
name:
description:
- The name of the channel
required: true
state:
description:
- Whether the machine should subscribe or unsubscribe from the channel
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the subscriptions json file
required: false
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
requirements: [ ]
author: Anders Ingemann
'''
RETURN = '''
reasons:
description: the reasons why the moule changed or did not change something
returned: success
type: list
sample: ["channel subscription was absent and state is `present'"]
'''
EXAMPLES = '''
# Subscribe to the nginx channel
- name: subscribe to nginx checks
sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: unsubscribe from common checks
sensu_subscription: name=common state=absent
'''
def sensu_subscription(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
config = json.load(open(path))
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
if 'client' not in config:
if state == 'absent':
reasons.append('`client\' did not exist and state is `absent\'')
return changed, reasons
config['client'] = {}
changed = True
reasons.append('`client\' did not exist')
if 'subscriptions' not in config['client']:
if state == 'absent':
reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
return changed
config['client']['subscriptions'] = []
changed = True
reasons.append('`client.subscriptions\' did not exist')
if name not in config['client']['subscriptions']:
if state == 'absent':
reasons.append('channel subscription was absent')
return changed
config['client']['subscriptions'].append(name)
changed = True
reasons.append('channel subscription was absent and state is `present\'')
else:
if state == 'absent':
config['client']['subscriptions'].remove(name)
changed = True
reasons.append('channel subscription was present and state is `absent\'')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, 'w').write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
}
module = AnsibleModule(argument_spec=arg_spec,
supports_check_mode=True)
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
eistre91/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
hds-lab/textvisdrg | msgvis/apps/datatable/models.py | 1 | 30970 | from django.db import models
from django.db.models import Q
from datetime import timedelta
import operator
from msgvis.apps.base.models import MappedValuesQuerySet
from msgvis.apps.corpus import models as corpus_models
from msgvis.apps.groups import models as groups_models
from msgvis.apps.dimensions import registry
from msgvis.apps.corpus import utils
import re
from django.db import connection
MAX_CATEGORICAL_LEVELS = 10
def find_messages(queryset):
"""If the given queryset is actually a :class:`.Dataset` model, get its messages queryset."""
if isinstance(queryset, corpus_models.Dataset):
queryset = queryset.message_set.all()
return queryset
def get_field_name(text):
pattern = re.compile('(?<=__)\w+')
results = pattern.search(text)
if results:
return results.group()
return None
def fetchall(sql):
sql = utils.convert_boolean(sql)
cursor = connection.cursor()
cursor.execute(sql)
desc = cursor.description
return [
row[0]
for row in cursor.fetchall()
]
def fetchall_table(sql):
sql = utils.convert_boolean(sql)
cursor = connection.cursor()
cursor.execute(sql)
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def group_messages_by_dimension_with_raw_query(query, dimension, callback):
queryset = corpus_models.Message.objects.raw(query)
message_id = corpus_models.Message._meta.model_name + "_id" #message_id
fieldname = get_field_name(dimension.field_name)
key = dimension.key
related_mgr = getattr(corpus_models.Message, dimension.key)
if hasattr(related_mgr, "RelatedObjectDoesNotExist"):
related_table = related_mgr.field.rel.to._meta.db_table
related_id = related_mgr.field.rel.to._meta.model._meta.model_name + "_id"
if related_id == "person_id":
related_id = "sender_id"
elif related_id == "messagetype_id":
related_id = "type_id"
final_query = "SELECT B.%s AS `%s`, count(*) AS `value` FROM (%s) AS A, `%s` AS B WHERE A.%s=B.id GROUP BY B.%s ORDER BY `value` DESC" %(fieldname, key, query, related_table, related_id, fieldname)
else:
if hasattr(related_mgr, "field"):
through_table = related_mgr.through._meta.db_table # e.g., corpus_message_hashtags
related_table = related_mgr.field.rel.to._meta.db_table # e.g., corpus_hashtag
related_id = related_mgr.field.rel.to._meta.model._meta.model_name + "_id" # e.g., hashtag_id
elif hasattr(related_mgr, "related"):
through_table = related_mgr.related.field.rel.through._meta.db_table # e.g., enhance_messageword
related_table = related_mgr.related.model._meta.db_table # e.g., enhance_word
related_id = related_mgr.related.model._meta.model_name + "_id" # e.g., word_id
final_query = "SELECT B.%s AS `%s`, count(*) AS `value` FROM (%s) AS A, `%s` AS B, `%s` AS C WHERE A.id=C.%s AND B.id=C.%s GROUP BY B.%s ORDER BY `value` DESC" %(fieldname, key, query, related_table, through_table, message_id, related_id, fieldname)
return callback(final_query)
def group_messages_by_words_with_raw_query(query, callback):
pattern = re.compile(r'T\d+.`text`')
results = pattern.search(query)
if results:
table = results.group()
query = query.replace("`corpus_message`.`id`, `corpus_message`.`dataset_id`, `corpus_message`.`original_id`, `corpus_message`.`type_id`, `corpus_message`.`sender_id`, `corpus_message`.`time`, `corpus_message`.`language_id`, `corpus_message`.`sentiment`, `corpus_message`.`timezone_id`, `corpus_message`.`replied_to_count`, `corpus_message`.`shared_count`, `corpus_message`.`contains_hashtag`, `corpus_message`.`contains_url`, `corpus_message`.`contains_media`, `corpus_message`.`contains_mention`, `corpus_message`.`text`",
"%s AS words, count(*) AS value" %(table))
query += "GROUP BY `words` ORDER BY `value` DESC"
return callback(query)
class DataTable(object):
"""
This class knows how to calculate appropriate visualization data
for a given pair of dimensions.
"""
def __init__(self, primary_dimension, secondary_dimension=None):
"""
Construct a DataTable for one or two dimensions.
Dimensions may be string dimension keys or
:class:`msgvis.apps.dimensions.models.CategoricalDimension` objects.
:type primary_dimension: registry.models.CategoricalDimension
:type secondary_dimension: registry.models.CategoricalDimension
:return:
"""
# Look up the dimensions if needed
if isinstance(primary_dimension, basestring):
primary_dimension = registry.get_dimension(primary_dimension)
if secondary_dimension is not None and isinstance(secondary_dimension, basestring):
secondary_dimension = registry.get_dimension(secondary_dimension)
# a dirty way
if secondary_dimension is not None and hasattr(secondary_dimension, 'key') and secondary_dimension.key == "groups":
secondary_dimension = None
self.primary_dimension = primary_dimension
self.secondary_dimension = secondary_dimension
self.mode = "default"
def set_mode(self, mode):
self.mode = mode
def render(self, queryset, desired_primary_bins=None, desired_secondary_bins=None):
"""
Given a set of messages (already filtered as necessary),
calculate the data table.
Optionally, a number of primary and secondary bins may be given.
The result is a list of dictionaries. Each
dictionary contains a key for each dimension
and a value key for the count.
"""
if not self.secondary_dimension:
# If there is only one dimension, we should be able to fall back
# on that dimension's group_by() implementation.
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key,
bins=desired_primary_bins)
return queryset.annotate(value=models.Count('id'))
else:
# Now it gets nasty...
primary_group = self.primary_dimension.get_grouping_expression(queryset,
bins=desired_primary_bins)
secondary_group = self.secondary_dimension.get_grouping_expression(queryset,
bins=desired_secondary_bins)
if primary_group is None or secondary_group is None:
# There is no data to group
return queryset.values()
queryset, internal_primary_key = self.primary_dimension.select_grouping_expression(
queryset,
primary_group)
queryset, internal_secondary_key = self.secondary_dimension.select_grouping_expression(
queryset,
secondary_group)
# Group the data
queryset = queryset.values(internal_primary_key,
internal_secondary_key)
# Count the messages
queryset = queryset.annotate(value=models.Count('id'))
# We may need to remap some fields
mapping = {}
if internal_primary_key != self.primary_dimension.key:
mapping[internal_primary_key] = self.primary_dimension.key
if internal_secondary_key != self.secondary_dimension.key:
mapping[internal_secondary_key] = self.secondary_dimension.key
if len(mapping) > 0:
return MappedValuesQuerySet.create_from(queryset, mapping)
else:
return queryset
def render_others(self, queryset, domains, primary_flag, secondary_flag, desired_primary_bins=None, desired_secondary_bins=None):
"""
Given a set of messages (already filtered as necessary),
calculate the data table.
Optionally, a number of primary and secondary bins may be given.
The result is a list of dictionaries. Each
dictionary contains a key for each dimension
and a value key for the count.
"""
# check if any of the dimensions is categorical
if not primary_flag and not secondary_flag:
return None
if not self.secondary_dimension and self.primary_dimension.is_categorical() and primary_flag:
# If there is only one dimension, we should be able to fall back
# on that dimension's group_by() implementation.
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
return [{self.primary_dimension.key: u'Other ' + self.primary_dimension.name, 'value': queryset.count()}]
elif self.secondary_dimension:
# both dimensions are categorical
if self.primary_dimension.is_categorical() and self.secondary_dimension.is_categorical():
original_queryset = queryset
others_results = []
if primary_flag:
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
if secondary_flag:
domains[self.secondary_dimension.key].append(u'Other ' + self.secondary_dimension.name)
# primary others x secondary others
if primary_flag and secondary_flag:
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
others_results.append({self.primary_dimension.key: u'Other ' + self.primary_dimension.name,
self.secondary_dimension.key: u'Other ' + self.secondary_dimension.name,
'value': queryset.count()})
# primary top ones x secondary others
if secondary_flag:
queryset = original_queryset
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.secondary_dimension.key] = u'Other ' + self.secondary_dimension.name
others_results.extend(results)
# primary others x secondary top ones
if primary_flag:
queryset = original_queryset
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
queryset = self.secondary_dimension.group_by(queryset,
grouping_key=self.secondary_dimension.key)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.primary_dimension.key] = u'Other ' + self.primary_dimension.name
others_results.extend(results)
return others_results
# primary categorical and secondary quantitative
elif self.primary_dimension.is_categorical() and primary_flag and not self.secondary_dimension.is_categorical():
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
queryset = self.secondary_dimension.group_by(queryset,
grouping_key=self.secondary_dimension.key,
bins=desired_secondary_bins)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.primary_dimension.key] = u'Other ' + self.primary_dimension.name
return results
# primary quantitative and secondary categorical
elif not self.primary_dimension.is_categorical() and self.secondary_dimension.is_categorical() and secondary_flag:
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
domains[self.secondary_dimension.key].append(u'Other ' + self.secondary_dimension.name)
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key,
bins=desired_primary_bins)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.secondary_dimension.key] = u'Other ' + self.secondary_dimension.name
return results
def domain(self, dimension, queryset, filter=None, exclude=None, desired_bins=None):
"""Return the sorted levels in this dimension"""
if filter is not None:
queryset = dimension.filter(queryset, **filter)
if exclude is not None:
queryset = dimension.exclude(queryset, **exclude)
domain = dimension.get_domain(queryset, bins=desired_bins)
labels = dimension.get_domain_labels(domain)
return domain, labels
def groups_domain(self, dimension, queryset_all, group_querysets, desired_bins=None):
"""Return the sorted levels in the union of groups in this dimension"""
if dimension.is_related_categorical():
query = ""
for idx, queryset in enumerate(group_querysets):
if idx > 0:
query += " UNION "
query += "(%s)" %(utils.quote(str(queryset.query)))
domain = group_messages_by_dimension_with_raw_query(query, dimension, fetchall)
else:
queryset = queryset_all
domain = dimension.get_domain(queryset, bins=desired_bins)
labels = dimension.get_domain_labels(domain)
return domain, labels
def filter_search_key(self, domain, labels, search_key):
match_domain = []
match_labels = []
for i in range(len(domain)):
level = domain[i]
if level is not None and level.lower().find(search_key.lower()) != -1 :
match_domain.append(level)
if labels is not None:
match_labels.append(labels[i])
return match_domain, match_labels
def generate(self, dataset, filters=None, exclude=None, page_size=100, page=None, search_key=None, groups=None):
"""
Generate a complete data group table response.
This includes 'table', which provides the non-zero
message frequency for each combination of primary and secondary dimension values,
respecting the filters.
It also includes 'domains', which provides, for both
primary and secondary dimensions, the levels of the
dimension irrespective of filters (except on those actual dimensions).
"""
if (groups is None):
queryset = dataset.message_set.all()
# Filter out null time
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
unfiltered_queryset = queryset
# Filter the data (look for filters on the primary/secondary dimensions at the same time
primary_filter = None
secondary_filter = None
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if dimension == self.primary_dimension:
primary_filter = filter
if dimension == self.secondary_dimension:
secondary_filter = filter
primary_exclude = None
secondary_exclude = None
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
if dimension == self.primary_dimension:
primary_exclude = exclude_filter
if dimension == self.secondary_dimension:
secondary_exclude = exclude_filter
domains = {}
domain_labels = {}
max_page = None
queryset_for_others = None
# flag is true if the dimension is categorical and has more than MAX_CATEGORICAL_LEVELS levels
primary_flag = False
secondary_flag = False
# Include the domains for primary and (secondary) dimensions
domain, labels = self.domain(self.primary_dimension,
unfiltered_queryset,
primary_filter, primary_exclude)
# paging the first dimension, this is for the filter distribution
if primary_filter is None and self.secondary_dimension is None and page is not None:
if search_key is not None:
domain, labels = self.filter_search_key(domain, labels, search_key)
start = (page - 1) * page_size
end = min(start + page_size, len(domain))
max_page = (len(domain) / page_size) + 1
# no level left
if len(domain) == 0 or start > len(domain):
return None
domain = domain[start:end]
if labels is not None:
labels = labels[start:end]
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domain))
else:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical() and len(domain) > MAX_CATEGORICAL_LEVELS:
primary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domain))
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.primary_dimension.key] = domain
if labels is not None:
domain_labels[self.primary_dimension.key] = labels
if self.secondary_dimension:
domain, labels = self.domain(self.secondary_dimension,
unfiltered_queryset,
secondary_filter, secondary_exclude)
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical() and \
len(domain) > MAX_CATEGORICAL_LEVELS:
secondary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if queryset_for_others is None:
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domain))
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.secondary_dimension.key] = domain
if labels is not None:
domain_labels[self.secondary_dimension.key] = labels
# Render a table
table = self.render(queryset)
if self.mode == "enable_others" and queryset_for_others is not None:
# adding others to the results
table_for_others = self.render_others(queryset_for_others, domains, primary_flag, secondary_flag)
table = list(table)
table.extend(table_for_others)
results = {
'table': table,
'domains': domains,
'domain_labels': domain_labels
}
if max_page is not None:
results['max_page'] = max_page
else:
domains = {}
domain_labels = {}
max_page = None
queryset_for_others = None
# flag is true if the dimension is categorical and has more than MAX_CATEGORICAL_LEVELS levels
primary_flag = False
secondary_flag = False
primary_filter = None
secondary_filter = None
primary_exclude = None
secondary_exclude = None
queryset = dataset.message_set.all()
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if dimension == self.primary_dimension:
primary_filter = filter
if dimension == self.secondary_dimension:
secondary_filter = filter
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
if dimension == self.primary_dimension:
primary_exclude = exclude_filter
if dimension == self.secondary_dimension:
secondary_exclude = exclude_filter
queryset_all = queryset
#queryset = corpus_models.Message.objects.none()
group_querysets = []
group_labels = []
#message_list = set()
for group in groups:
group_obj = groups_models.Group.objects.get(id=group)
if group_obj.order > 0:
group_labels.append("#%d %s"%(group_obj.order, group_obj.name))
else:
group_labels.append("%s"%(group_obj.name))
queryset = group_obj.messages
# Filter out null time
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
unfiltered_queryset = queryset
# Filter the data (look for filters on the primary/secondary dimensions at the same time
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
group_querysets.append(queryset)
#########################################################################################################################
# deal with union distribution
# This is due to union of queries in django does not work...
# super ugly. Refactoring is required.
# Include the domains for primary and (secondary) dimensions
domain, labels = self.groups_domain(self.primary_dimension,
queryset_all, group_querysets)
# paging the first dimension, this is for the filter distribution
if primary_filter is None and self.secondary_dimension is None and page is not None:
if search_key is not None:
domain, labels = self.filter_search_key(domain, labels, search_key)
start = (page - 1) * page_size
end = min(start + page_size, len(domain))
max_page = (len(domain) / page_size) + 1
# no level left
if len(domain) == 0 or start > len(domain):
return None
domain = domain[start:end]
if labels is not None:
labels = labels[start:end]
else:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical() and len(domain) > MAX_CATEGORICAL_LEVELS:
primary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.primary_dimension.key] = domain
if labels is not None:
domain_labels[self.primary_dimension.key] = labels
if self.secondary_dimension:
domain, labels = self.groups_domain(self.secondary_dimension,
queryset_all, group_querysets)
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical() and \
len(domain) > MAX_CATEGORICAL_LEVELS:
secondary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.secondary_dimension.key] = domain
if labels is not None:
domain_labels[self.secondary_dimension.key] = labels
#########################################################################################################################
group_tables = []
for queryset in group_querysets:
queryset_for_others = queryset
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical():
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
if self.secondary_dimension:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical():
if queryset_for_others is None:
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
# Render a table
if self.primary_dimension.key == "words":
table = group_messages_by_words_with_raw_query(utils.quote(str(queryset.query)), fetchall_table)
else:
table = self.render(queryset)
if self.mode == "enable_others" and queryset_for_others is not None:
# adding others to the results
table_for_others = self.render_others(queryset_for_others, domains, primary_flag, secondary_flag)
table = list(table)
table.extend(table_for_others)
group_tables.append(table)
if self.secondary_dimension is None:
final_table = []
for idx, group_table in enumerate(group_tables):
for item in group_table:
item['groups'] = groups[idx]
final_table.extend(group_table)
domains['groups'] = groups
domain_labels['groups'] = group_labels
results = {
'table': final_table,
'domains': domains,
'domain_labels': domain_labels
}
else:
tables = []
for idx, group_table in enumerate(group_tables):
tables.append({
'group_id': groups[idx],
'group_name': group_labels[idx],
'table': group_table
})
results = {
'tables': tables,
'domains': domains,
'domain_labels': domain_labels
}
if max_page is not None:
results['max_page'] = max_page
return results
| mit |
osvalr/odoo | addons/l10n_ae/__openerp__.py | 337 | 1579 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'U.A.E. - Accounting',
'version': '1.0',
'author': 'Tech Receptives',
'website': 'http://www.techreceptives.com',
'category': 'Localization/Account Charts',
'description': """
United Arab Emirates accounting chart and localization.
=======================================================
""",
'depends': ['base', 'account', 'account_chart'],
'demo': [ ],
'data': [
'l10n_ae_chart.xml',
'l10n_ae_wizard.xml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/requests/packages/charade/langcyrillicmodel.py | 184 | 18054 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| apache-2.0 |
xsynergy510x/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/android_traceview_profiler.py | 35 | 2545 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidTraceviewProfiler(profiler.Profiler):
"""Collects a Traceview on Android."""
_DEFAULT_DEVICE_DIR = '/data/local/tmp/traceview'
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidTraceviewProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
if self._browser_backend.adb.device().FileExists(self._DEFAULT_DEVICE_DIR):
self._browser_backend.adb.RunShellCommand(
'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
else:
self._browser_backend.adb.RunShellCommand(
'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
self._browser_backend.adb.RunShellCommand(
'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
self._trace_files = []
for pid in self._GetProcessOutputFileMap().iterkeys():
device_dump_file = '%s/%s.trace' % (self._DEFAULT_DEVICE_DIR, pid)
self._trace_files.append((pid, device_dump_file))
self._browser_backend.adb.RunShellCommand('am profile %s start %s' %
(pid, device_dump_file))
@classmethod
def name(cls):
return 'android-traceview'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
output_files = []
for pid, trace_file in self._trace_files:
self._browser_backend.adb.RunShellCommand('am profile %s stop' % pid)
util.WaitFor(lambda: self._FileSize(trace_file) > 0, timeout=10)
output_files.append(trace_file)
self._browser_backend.adb.device().old_interface.Adb().Pull(
self._DEFAULT_DEVICE_DIR, self._output_path)
self._browser_backend.adb.RunShellCommand(
'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
print 'Traceview profiles available in ', self._output_path
print 'Use third_party/android_tools/sdk/tools/monitor '
print 'then use "File->Open File" to visualize them.'
return output_files
def _FileSize(self, file_name):
f = self._browser_backend.adb.device().Ls(file_name)
return f.get(os.path.basename(file_name), (0, ))[0]
| bsd-3-clause |
Dahlgren/HTPC-Manager | libs/apscheduler/jobstores/memory.py | 33 | 3648 | from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import datetime_to_utc_timestamp
class MemoryJobStore(BaseJobStore):
"""
Stores jobs in an array in RAM. Provides no persistence support.
Plugin alias: ``memory``
"""
def __init__(self):
super(MemoryJobStore, self).__init__()
self._jobs = [] # list of (job, timestamp), sorted by next_run_time and job id (ascending)
self._jobs_index = {} # id -> (job, timestamp) lookup table
def lookup_job(self, job_id):
return self._jobs_index.get(job_id, (None, None))[0]
def get_due_jobs(self, now):
now_timestamp = datetime_to_utc_timestamp(now)
pending = []
for job, timestamp in self._jobs:
if timestamp is None or timestamp > now_timestamp:
break
pending.append(job)
return pending
def get_next_run_time(self):
return self._jobs[0][0].next_run_time if self._jobs else None
def get_all_jobs(self):
return [j[0] for j in self._jobs]
def add_job(self, job):
if job.id in self._jobs_index:
raise ConflictingIdError(job.id)
timestamp = datetime_to_utc_timestamp(job.next_run_time)
index = self._get_job_index(timestamp, job.id)
self._jobs.insert(index, (job, timestamp))
self._jobs_index[job.id] = (job, timestamp)
def update_job(self, job):
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
if old_job is None:
raise JobLookupError(job.id)
# If the next run time has not changed, simply replace the job in its present index.
# Otherwise, reinsert the job to the list to preserve the ordering.
old_index = self._get_job_index(old_timestamp, old_job.id)
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
if old_timestamp == new_timestamp:
self._jobs[old_index] = (job, new_timestamp)
else:
del self._jobs[old_index]
new_index = self._get_job_index(new_timestamp, job.id)
self._jobs.insert(new_index, (job, new_timestamp))
self._jobs_index[old_job.id] = (job, new_timestamp)
def remove_job(self, job_id):
job, timestamp = self._jobs_index.get(job_id, (None, None))
if job is None:
raise JobLookupError(job_id)
index = self._get_job_index(timestamp, job_id)
del self._jobs[index]
del self._jobs_index[job.id]
def remove_all_jobs(self):
self._jobs = []
self._jobs_index = {}
def shutdown(self):
self.remove_all_jobs()
def _get_job_index(self, timestamp, job_id):
"""
Returns the index of the given job, or if it's not found, the index where the job should be inserted based on
the given timestamp.
:type timestamp: int
:type job_id: str
"""
lo, hi = 0, len(self._jobs)
timestamp = float('inf') if timestamp is None else timestamp
while lo < hi:
mid = (lo + hi) // 2
mid_job, mid_timestamp = self._jobs[mid]
mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
if mid_timestamp > timestamp:
hi = mid
elif mid_timestamp < timestamp:
lo = mid + 1
elif mid_job.id > job_id:
hi = mid
elif mid_job.id < job_id:
lo = mid + 1
else:
return mid
return lo
| mit |
ericsnowcurrently/micropython | tests/float/string_format.py | 21 | 5014 | # Change the following to True to get a much more comprehensive set of tests
# to run, albeit, which take considerably longer.
full_tests = False
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("{:10.4}", 123.456)
test("{:10.4e}", 123.456)
test("{:10.4e}", -123.456)
test("{:10.4f}", 123.456)
test("{:10.4f}", -123.456)
test("{:10.4g}", 123.456)
test("{:10.4g}", -123.456)
test("{:10.4n}", 123.456)
test("{:e}", 100)
test("{:f}", 200)
test("{:g}", 300)
test("{:10.4E}", 123.456)
test("{:10.4E}", -123.456)
test("{:10.4F}", 123.456)
test("{:10.4F}", -123.456)
test("{:10.4G}", 123.456)
test("{:10.4G}", -123.456)
test("{:06e}", float("inf"))
test("{:06e}", float("-inf"))
test("{:06e}", float("nan"))
# The following fails right now
#test("{:10.1}", 0.0)
def test_fmt(conv, fill, alignment, sign, prefix, width, precision, type, arg):
fmt = '{'
if conv:
fmt += '!'
fmt += conv
fmt += ':'
if alignment:
fmt += fill
fmt += alignment
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
if fill == '0' and alignment == '=':
fmt = '{:'
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
eg_nums = (0.0, -0.0, 0.1, 1.234, 12.3459, 1.23456789, 123456789.0, -0.0,
-0.1, -1.234, -12.3459, 1e4, 1e-4, 1e5, 1e-5, 1e6, 1e-6, 1e10,
1e37, -1e37, 1e-37, -1e-37,
1.23456e8, 1.23456e7, 1.23456e6, 1.23456e5, 1.23456e4, 1.23456e3, 1.23456e2, 1.23456e1, 1.23456e0,
1.23456e-1, 1.23456e-2, 1.23456e-3, 1.23456e-4, 1.23456e-5, 1.23456e-6, 1.23456e-7, 1.23456e-8,
-1.23456e8, -1.23456e7, -1.23456e6, -1.23456e5, -1.23456e4, -1.23456e3, -1.23456e2, -1.23456e1, -1.23456e0,
-1.23456e-1, -1.23456e-2, -1.23456e-3, -1.23456e-4, -1.23456e-5, -1.23456e-6, -1.23456e-7, -1.23456e-8)
if full_tests:
for type in ('e', 'E', 'g', 'G', 'n'):
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', '@', '0', ' '):
for sign in ('', '+', '-', ' '):
for prec in ('', '1', '3', '6'):
for num in eg_nums:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
# Note: We use 1.23459 rather than 1.2345 because '{:3f}'.format(1.2345)
# rounds differently than print("%.3f", 1.2345);
f_nums = (0.0, -0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0,
0.0012, 0.0123, 0.1234, 1.23459, 12.3456,
-0.0001, -0.001, -0.01, -0.1, -1.0, -10.0,
-0.0012, -0.0123, -0.1234, -1.23459, -12.3456)
if full_tests:
for type in ('f', 'F'):
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
# An empty precision defaults to 6, but when uPy is
# configured to use a float, we can only use a
# precision of 6 with numbers less than 10 and still
# get results that compare to CPython (which uses
# long doubles).
for prec in ('1', '2', '3'):
for num in f_nums:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
for num in int_nums2:
test_fmt('', fill, alignment, sign, '', width, '', type, num)
pct_nums1 = (0.1, 0.58, 0.99, -0.1, -0.58, -0.99)
pct_nums2 = (True, False, 1, 0, -1)
if full_tests:
type = '%'
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
# An empty precision defaults to 6, but when uPy is
# configured to use a float, we can only use a
# precision of 6 with numbers less than 10 and still
# get results that compare to CPython (which uses
# long doubles).
for prec in ('1', '2', '3'):
for num in pct_nums1:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
for num in pct_nums2:
test_fmt('', fill, alignment, sign, '', width, '', type, num)
else:
for num in pct_nums1:
test_fmt('', '', '', '', '', '', '1', '%', num)
# We don't currently test a type of '' with floats (see the detailed comment
# in objstr.c)
# tests for errors in format string
try:
'{:10.1b}'.format(0.0)
except ValueError:
print('ValueError')
| mit |
metabrainz/picard | picard/dataobj.py | 3 | 2107 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006-2008 Lukáš Lalinský
# Copyright (C) 2011-2012 Michael Wiencek
# Copyright (C) 2013, 2020 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2018 Vishal Choudhary
# Copyright (C) 2018, 2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.config import get_config
from picard.util import LockableObject
class DataObject(LockableObject):
def __init__(self, obj_id):
super().__init__()
self.id = obj_id
self.genres = {}
self.item = None
def add_genre(self, name, count):
self.genres[name] = self.genres.get(name, 0) + count
@staticmethod
def set_genre_inc_params(inc):
require_authentication = False
config = get_config()
if config.setting['use_genres']:
use_folksonomy = config.setting['folksonomy_tags']
if config.setting['only_my_genres']:
require_authentication = True
inc += ['user-tags'] if use_folksonomy else ['user-genres']
else:
inc += ['tags'] if use_folksonomy else ['genres']
return require_authentication
@staticmethod
def merge_genres(this, that):
for name, count in that.items():
this[name] = this.get(name, 0) + count
| gpl-2.0 |
agileblaze/OpenStackTwoFactorAuthentication | openstack_dashboard/api/rest/nova.py | 35 | 9198 |
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the nova service.
"""
from django.utils import http as utils_http
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
@urls.register
class Keypairs(generic.View):
"""API for nova keypairs.
"""
url_regex = r'nova/keypairs/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of keypairs associated with the current logged-in
account.
The listing result is an object with property "items".
"""
result = api.nova.keypair_list(request)
return {'items': [u.to_dict() for u in result]}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a keypair.
Create a keypair using the parameters supplied in the POST
application/json object. The parameters are:
:param name: the name to give the keypair
:param public_key: (optional) a key to import
This returns the new keypair object on success.
"""
if 'public_key' in request.DATA:
new = api.nova.keypair_import(request, request.DATA['name'],
request.DATA['public_key'])
else:
new = api.nova.keypair_create(request, request.DATA['name'])
return rest_utils.CreatedResponse(
'/api/nova/keypairs/%s' % utils_http.urlquote(new.name),
new.to_dict()
)
@urls.register
class AvailabilityZones(generic.View):
"""API for nova availability zones.
"""
url_regex = r'nova/availzones/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of availability zones.
The following get parameters may be passed in the GET
request:
:param detailed: If this equals "true" then the result will
include more detail.
The listing result is an object with property "items".
"""
detailed = request.GET.get('detailed') == 'true'
result = api.nova.availability_zone_list(request, detailed)
return {'items': [u.to_dict() for u in result]}
@urls.register
class Limits(generic.View):
"""API for nova limits.
"""
url_regex = r'nova/limits/$'
@rest_utils.ajax()
def get(self, request):
"""Get an object describing the current project limits.
Note: the Horizon API doesn't support any other project (tenant) but
the underlying client does...
The following get parameters may be passed in the GET
request:
:param reserved: This may be set to "true" but it's not
clear what the result of that is.
The result is an object with limits as properties.
"""
reserved = request.GET.get('reserved') == 'true'
result = api.nova.tenant_absolute_limits(request, reserved)
return result
@urls.register
class Servers(generic.View):
"""API over all servers.
"""
url_regex = r'nova/servers/$'
_optional_create = [
'block_device_mapping', 'block_device_mapping_v2', 'nics', 'meta',
'availability_zone', 'instance_count', 'admin_pass', 'disk_config',
'config_drive'
]
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a server.
Create a server using the parameters supplied in the POST
application/json object. The required parameters as specified by
the underlying novaclient are:
:param name: The new server name.
:param source_id: The ID of the image to use.
:param flavor_id: The ID of the flavor to use.
:param key_name: (optional extension) name of previously created
keypair to inject into the instance.
:param user_data: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string.
:param security_groups: An array of one or more objects with a "name"
attribute.
Other parameters are accepted as per the underlying novaclient:
"block_device_mapping", "block_device_mapping_v2", "nics", "meta",
"availability_zone", "instance_count", "admin_pass", "disk_config",
"config_drive"
This returns the new server object on success.
"""
try:
args = (
request,
request.DATA['name'],
request.DATA['source_id'],
request.DATA['flavor_id'],
request.DATA['key_name'],
request.DATA['user_data'],
request.DATA['security_groups'],
)
except KeyError as e:
raise rest_utils.AjaxError(400, 'missing required parameter '
"'%s'" % e.args[0])
kw = {}
for name in self._optional_create:
if name in request.DATA:
kw[name] = request.DATA[name]
new = api.nova.server_create(*args, **kw)
return rest_utils.CreatedResponse(
'/api/nova/servers/%s' % utils_http.urlquote(new.id),
new.to_dict()
)
@urls.register
class Server(generic.View):
"""API for retrieving a single server
"""
url_regex = r'nova/servers/(?P<server_id>.+|default)$'
@rest_utils.ajax()
def get(self, request, server_id):
"""Get a specific server
http://localhost/api/nova/servers/1
"""
return api.nova.server_get(request, server_id).to_dict()
@urls.register
class Extensions(generic.View):
"""API for nova extensions.
"""
url_regex = r'nova/extensions/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of extensions.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/nova/extensions
"""
result = api.nova.list_extensions(request)
return {'items': [e.to_dict() for e in result]}
@urls.register
class Flavors(generic.View):
"""API for nova flavors.
"""
url_regex = r'nova/flavors/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of flavors.
The listing result is an object with property "items". Each item is
an flavor. By default this will return the flavors for the user's
current project. If the user is admin, public flavors will also be
returned.
:param is_public: For a regular user, set to True to see all public
flavors. For an admin user, set to False to not see public flavors.
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors?is_public=true
"""
is_public = request.GET.get('is_public')
is_public = (is_public and is_public.lower() == 'true')
get_extras = request.GET.get('get_extras')
get_extras = bool(get_extras and get_extras.lower() == 'true')
flavors = api.nova.flavor_list(request, is_public=is_public,
get_extras=get_extras)
result = {'items': []}
for flavor in flavors:
d = flavor.to_dict()
if get_extras:
d['extras'] = flavor.extras
result['items'].append(d)
return result
@urls.register
class Flavor(generic.View):
"""API for retrieving a single flavor
"""
url_regex = r'nova/flavors/(?P<flavor_id>.+)/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors/1
"""
get_extras = request.GET.get('get_extras')
get_extras = bool(get_extras and get_extras.lower() == 'true')
flavor = api.nova.flavor_get(request, flavor_id, get_extras=get_extras)
result = flavor.to_dict()
if get_extras:
result['extras'] = flavor.extras
return result
@urls.register
class FlavorExtraSpecs(generic.View):
"""API for managing flavor extra specs
"""
url_regex = r'nova/flavors/(?P<flavor_id>.+)/extra-specs$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor's extra specs
Example GET:
http://localhost/api/nova/flavors/1/extra-specs
"""
return api.nova.flavor_get_extras(request, flavor_id, raw=True)
| apache-2.0 |
liberorbis/libernext | apps/erpnext/erpnext/accounts/doctype/journal_voucher/journal_voucher.py | 13 | 21988 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, fmt_money, formatdate, getdate
from frappe import msgprint, _, scrub
from erpnext.setup.utils import get_company_currency
from erpnext.controllers.accounts_controller import AccountsController
class JournalVoucher(AccountsController):
def __init__(self, arg1, arg2=None):
super(JournalVoucher, self).__init__(arg1, arg2)
def validate(self):
if not self.is_opening:
self.is_opening='No'
self.clearance_date = None
super(JournalVoucher, self).validate_date_with_fiscal_year()
self.validate_cheque_info()
self.validate_entries_for_advance()
self.validate_debit_and_credit()
self.validate_against_jv()
self.validate_against_sales_invoice()
self.validate_against_purchase_invoice()
self.set_against_account()
self.create_remarks()
self.set_aging_date()
self.set_print_format_fields()
self.validate_against_sales_order()
self.validate_against_purchase_order()
def on_submit(self):
if self.voucher_type in ['Bank Voucher', 'Contra Voucher', 'Journal Entry']:
self.check_reference_date()
self.make_gl_entries()
self.check_credit_limit()
self.update_advance_paid()
def update_advance_paid(self):
advance_paid = frappe._dict()
for d in self.get("entries"):
if d.is_advance:
if d.against_sales_order:
advance_paid.setdefault("Sales Order", []).append(d.against_sales_order)
elif d.against_purchase_order:
advance_paid.setdefault("Purchase Order", []).append(d.against_purchase_order)
for voucher_type, order_list in advance_paid.items():
for voucher_no in list(set(order_list)):
frappe.get_doc(voucher_type, voucher_no).set_total_advance_paid()
def on_cancel(self):
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_jv")
self.make_gl_entries(1)
self.update_advance_paid()
def validate_cheque_info(self):
if self.voucher_type in ['Bank Voucher']:
if not self.cheque_no or not self.cheque_date:
msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type),
raise_exception=1)
if self.cheque_date and not self.cheque_no:
msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1)
def validate_entries_for_advance(self):
for d in self.get('entries'):
if not (d.against_voucher and d.against_invoice and d.against_jv):
master_type = frappe.db.get_value("Account", d.account, "master_type")
if (master_type == 'Customer' and flt(d.credit) > 0) or \
(master_type == 'Supplier' and flt(d.debit) > 0):
if not d.is_advance:
msgprint(_("Row {0}: Please check 'Is Advance' against Account {1} if this is an advance entry.").format(d.idx, d.account))
elif (d.against_sales_order or d.against_purchase_order) and d.is_advance != "Yes":
frappe.throw(_("Row {0}: Payment against Sales/Purchase Order should always be marked as advance").format(d.idx))
def validate_against_jv(self):
for d in self.get('entries'):
if d.against_jv:
account_root_type = frappe.db.get_value("Account", d.account, "root_type")
if account_root_type == "Asset" and flt(d.debit) > 0:
frappe.throw(_("For {0}, only credit entries can be linked against another debit entry")
.format(d.account))
elif account_root_type == "Liability" and flt(d.credit) > 0:
frappe.throw(_("For {0}, only debit entries can be linked against another credit entry")
.format(d.account))
if d.against_jv == self.name:
frappe.throw(_("You can not enter current voucher in 'Against Journal Voucher' column"))
against_entries = frappe.db.sql("""select * from `tabJournal Voucher Detail`
where account = %s and docstatus = 1 and parent = %s
and ifnull(against_jv, '') = '' and ifnull(against_invoice, '') = ''
and ifnull(against_voucher, '') = ''""", (d.account, d.against_jv), as_dict=True)
if not against_entries:
frappe.throw(_("Journal Voucher {0} does not have account {1} or already matched against other voucher")
.format(d.against_jv, d.account))
else:
dr_or_cr = "debit" if d.credit > 0 else "credit"
valid = False
for jvd in against_entries:
if flt(jvd[dr_or_cr]) > 0:
valid = True
if not valid:
frappe.throw(_("Against Journal Voucher {0} does not have any unmatched {1} entry")
.format(d.against_jv, dr_or_cr))
def validate_against_sales_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_invoice", "Sales Invoice")
self.validate_against_invoice_fields("Sales Invoice", payment_against_voucher)
def validate_against_purchase_invoice(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_voucher", "Purchase Invoice")
self.validate_against_invoice_fields("Purchase Invoice", payment_against_voucher)
def validate_against_sales_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_sales_order", "Sales Order")
self.validate_against_order_fields("Sales Order", payment_against_voucher)
def validate_against_purchase_order(self):
payment_against_voucher = self.validate_account_in_against_voucher("against_purchase_order", "Purchase Order")
self.validate_against_order_fields("Purchase Order", payment_against_voucher)
def validate_account_in_against_voucher(self, against_field, doctype):
payment_against_voucher = frappe._dict()
field_dict = {'Sales Invoice': "Debit To",
'Purchase Invoice': "Credit To",
'Sales Order': "Customer",
'Purchase Order': "Supplier"
}
for d in self.get("entries"):
if d.get(against_field):
dr_or_cr = "credit" if against_field in ["against_invoice", "against_sales_order"] \
else "debit"
if against_field in ["against_invoice", "against_sales_order"] \
and flt(d.debit) > 0:
frappe.throw(_("Row {0}: Debit entry can not be linked with a {1}").format(d.idx, doctype))
if against_field in ["against_voucher", "against_purchase_order"] \
and flt(d.credit) > 0:
frappe.throw(_("Row {0}: Credit entry can not be linked with a {1}").format(d.idx, doctype))
voucher_account = frappe.db.get_value(doctype, d.get(against_field), \
scrub(field_dict.get(doctype)))
account_master_name = frappe.db.get_value("Account", d.account, "master_name")
if against_field in ["against_invoice", "against_voucher"] \
and voucher_account != d.account:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} account") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
if against_field in ["against_sales_order", "against_purchase_order"]:
if voucher_account != account_master_name:
frappe.throw(_("Row {0}: Account {1} does not match with {2} {3} Name") \
.format(d.idx, d.account, doctype, field_dict.get(doctype)))
elif d.is_advance == "Yes":
payment_against_voucher.setdefault(d.get(against_field), []).append(flt(d.get(dr_or_cr)))
return payment_against_voucher
def validate_against_invoice_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "outstanding_amount"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) < flt(sum(payment_list)):
frappe.throw(_("Payment against {0} {1} cannot be greater \
than Outstanding Amount {2}").format(doctype, voucher_no, voucher_properties[1]))
def validate_against_order_fields(self, doctype, payment_against_voucher):
for voucher_no, payment_list in payment_against_voucher.items():
voucher_properties = frappe.db.get_value(doctype, voucher_no,
["docstatus", "per_billed", "status", "advance_paid", "grand_total"])
if voucher_properties[0] != 1:
frappe.throw(_("{0} {1} is not submitted").format(doctype, voucher_no))
if flt(voucher_properties[1]) >= 100:
frappe.throw(_("{0} {1} is fully billed").format(doctype, voucher_no))
if cstr(voucher_properties[2]) == "Stopped":
frappe.throw(_("{0} {1} is stopped").format(doctype, voucher_no))
if flt(voucher_properties[4]) < flt(voucher_properties[3]) + flt(sum(payment_list)):
frappe.throw(_("Advance paid against {0} {1} cannot be greater \
than Grand Total {2}").format(doctype, voucher_no, voucher_properties[3]))
def set_against_account(self):
accounts_debited, accounts_credited = [], []
for d in self.get("entries"):
if flt(d.debit > 0): accounts_debited.append(d.account)
if flt(d.credit) > 0: accounts_credited.append(d.account)
for d in self.get("entries"):
if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited)))
def validate_debit_and_credit(self):
self.total_debit, self.total_credit, self.difference = 0, 0, 0
for d in self.get("entries"):
if d.debit and d.credit:
frappe.throw(_("You cannot credit and debit same account at the same time"))
self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "entries"))
self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "entries"))
self.difference = flt(self.total_debit, self.precision("total_debit")) - \
flt(self.total_credit, self.precision("total_credit"))
if self.difference:
frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}")
.format(self.difference))
def create_remarks(self):
r = []
if self.cheque_no:
if self.cheque_date:
r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date)))
else:
msgprint(_("Please enter Reference date"), raise_exception=frappe.MandatoryError)
for d in self.get('entries'):
if d.against_invoice and d.credit:
currency = frappe.db.get_value("Sales Invoice", d.against_invoice, "currency")
r.append(_("{0} against Sales Invoice {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_invoice))
if d.against_sales_order and d.credit:
currency = frappe.db.get_value("Sales Order", d.against_sales_order, "currency")
r.append(_("{0} against Sales Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_sales_order))
if d.against_voucher and d.debit:
bill_no = frappe.db.sql("""select bill_no, bill_date, currency
from `tabPurchase Invoice` where name=%s""", d.against_voucher)
if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \
not in ['na', 'not applicable', 'none']:
r.append(_('{0} {1} against Bill {2} dated {3}').format(bill_no[0][2],
fmt_money(flt(d.debit)), bill_no[0][0],
bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d'))))
if d.against_purchase_order and d.debit:
currency = frappe.db.get_value("Purchase Order", d.against_purchase_order, "currency")
r.append(_("{0} against Purchase Order {1}").format(fmt_money(flt(d.credit), currency = currency), \
d.against_purchase_order))
if self.user_remark:
r.append(_("Note: {0}").format(self.user_remark))
if r:
self.remark = ("\n").join(r) #User Remarks is not mandatory
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
else:
# check account type whether supplier or customer
exists = False
for d in self.get('entries'):
account_type = frappe.db.get_value("Account", d.account, "account_type")
if account_type in ["Supplier", "Customer"]:
exists = True
break
# If customer/supplier account, aging date is mandatory
if exists and not self.aging_date:
msgprint(_("Aging Date is mandatory for opening entry"), raise_exception=1)
else:
self.aging_date = self.posting_date
def set_print_format_fields(self):
for d in self.get('entries'):
acc = frappe.db.get_value("Account", d.account, ["account_type", "master_type"], as_dict=1)
if not acc: continue
if acc.master_type in ['Supplier', 'Customer']:
if not self.pay_to_recd_from:
self.pay_to_recd_from = frappe.db.get_value(acc.master_type, ' - '.join(d.account.split(' - ')[:-1]),
acc.master_type == 'Customer' and 'customer_name' or 'supplier_name')
if self.voucher_type in ["Credit Note", "Debit Note"]:
self.set_total_amount(d.debit or d.credit)
if acc.account_type in ['Bank', 'Cash']:
self.set_total_amount(d.debit or d.credit)
def set_total_amount(self, amt):
company_currency = get_company_currency(self.company)
self.total_amount = fmt_money(amt, currency=company_currency)
from frappe.utils import money_in_words
self.total_amount_in_words = money_in_words(amt, company_currency)
def check_reference_date(self):
if self.cheque_date:
for d in self.get("entries"):
due_date = None
if d.against_invoice and flt(d.credit) > 0:
due_date = frappe.db.get_value("Sales Invoice", d.against_invoice, "due_date")
elif d.against_voucher and flt(d.debit) > 0:
due_date = frappe.db.get_value("Purchase Invoice", d.against_voucher, "due_date")
if due_date and getdate(self.cheque_date) > getdate(due_date):
msgprint(_("Note: Reference Date {0} is after invoice due date {1}")
.format(formatdate(self.cheque_date), formatdate(due_date)))
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
for d in self.get("entries"):
if d.debit or d.credit:
gl_map.append(
self.get_gl_dict({
"account": d.account,
"against": d.against_account,
"debit": flt(d.debit, self.precision("debit", "entries")),
"credit": flt(d.credit, self.precision("credit", "entries")),
"against_voucher_type": (("Purchase Invoice" if d.against_voucher else None)
or ("Sales Invoice" if d.against_invoice else None)
or ("Journal Voucher" if d.against_jv else None)
or ("Sales Order" if d.against_sales_order else None)
or ("Purchase Order" if d.against_purchase_order else None)),
"against_voucher": d.against_voucher or d.against_invoice or d.against_jv
or d.against_sales_order or d.against_purchase_order,
"remarks": self.remark,
"cost_center": d.cost_center
})
)
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
def check_credit_limit(self):
for d in self.get("entries"):
master_type, master_name = frappe.db.get_value("Account", d.account,
["master_type", "master_name"])
if master_type == "Customer" and master_name and flt(d.debit) > 0:
super(JournalVoucher, self).check_credit_limit(d.account)
def get_balance(self):
if not self.get('entries'):
msgprint(_("'Entries' cannot be empty"), raise_exception=True)
else:
flag, self.total_debit, self.total_credit = 0, 0, 0
diff = flt(self.difference, self.precision("difference"))
# If any row without amount, set the diff on that row
for d in self.get('entries'):
if not d.credit and not d.debit and diff != 0:
if diff>0:
d.credit = diff
elif diff<0:
d.debit = diff
flag = 1
# Set the diff in a new row
if flag == 0 and diff != 0:
jd = self.append('entries', {})
if diff>0:
jd.credit = abs(diff)
elif diff<0:
jd.debit = abs(diff)
self.validate_debit_and_credit()
def get_outstanding_invoices(self):
self.set('entries', [])
total = 0
for d in self.get_values():
total += flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1 = self.append('entries', {})
jd1.account = d.account
if self.write_off_based_on == 'Accounts Receivable':
jd1.credit = flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1.against_invoice = cstr(d.name)
elif self.write_off_based_on == 'Accounts Payable':
jd1.debit = flt(d.outstanding_amount, self.precision("debit", "entries"))
jd1.against_voucher = cstr(d.name)
jd2 = self.append('entries', {})
if self.write_off_based_on == 'Accounts Receivable':
jd2.debit = total
elif self.write_off_based_on == 'Accounts Payable':
jd2.credit = total
self.validate_debit_and_credit()
def get_values(self):
cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \
if flt(self.write_off_amount) > 0 else ""
if self.write_off_based_on == 'Accounts Receivable':
return frappe.db.sql("""select name, debit_to as account, outstanding_amount
from `tabSales Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
elif self.write_off_based_on == 'Accounts Payable':
return frappe.db.sql("""select name, credit_to as account, outstanding_amount
from `tabPurchase Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
@frappe.whitelist()
def get_default_bank_cash_account(company, voucher_type):
from erpnext.accounts.utils import get_balance_on
account = frappe.db.get_value("Company", company,
voucher_type=="Bank Voucher" and "default_bank_account" or "default_cash_account")
if account:
return {
"account": account,
"balance": get_balance_on(account)
}
@frappe.whitelist()
def get_payment_entry_from_sales_invoice(sales_invoice):
from erpnext.accounts.utils import get_balance_on
si = frappe.get_doc("Sales Invoice", sales_invoice)
jv = get_payment_entry(si)
jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks)
# credit customer
jv.get("entries")[0].account = si.debit_to
jv.get("entries")[0].balance = get_balance_on(si.debit_to)
jv.get("entries")[0].credit = si.outstanding_amount
jv.get("entries")[0].against_invoice = si.name
# debit bank
jv.get("entries")[1].debit = si.outstanding_amount
return jv.as_dict()
@frappe.whitelist()
def get_payment_entry_from_purchase_invoice(purchase_invoice):
from erpnext.accounts.utils import get_balance_on
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
jv = get_payment_entry(pi)
jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks)
# credit supplier
jv.get("entries")[0].account = pi.credit_to
jv.get("entries")[0].balance = get_balance_on(pi.credit_to)
jv.get("entries")[0].debit = pi.outstanding_amount
jv.get("entries")[0].against_voucher = pi.name
# credit bank
jv.get("entries")[1].credit = pi.outstanding_amount
return jv.as_dict()
def get_payment_entry(doc):
bank_account = get_default_bank_cash_account(doc.company, "Bank Voucher")
jv = frappe.new_doc('Journal Voucher')
jv.voucher_type = 'Bank Voucher'
jv.company = doc.company
jv.fiscal_year = doc.fiscal_year
jv.append("entries")
d2 = jv.append("entries")
if bank_account:
d2.account = bank_account["account"]
d2.balance = bank_account["balance"]
return jv
@frappe.whitelist()
def get_opening_accounts(company):
"""get all balance sheet accounts for opening entry"""
from erpnext.accounts.utils import get_balance_on
accounts = frappe.db.sql_list("""select name from tabAccount
where group_or_ledger='Ledger' and report_type='Balance Sheet' and company=%s""", company)
return [{"account": a, "balance": get_balance_on(a)} for a in accounts]
def get_against_purchase_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, credit_to, outstanding_amount, bill_no, bill_date
from `tabPurchase Invoice` where credit_to = %s and docstatus = 1
and outstanding_amount > 0 and %s like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_sales_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, debit_to, outstanding_amount
from `tabSales Invoice` where debit_to = %s and docstatus = 1
and outstanding_amount > 0 and `%s` like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select distinct jv.name, jv.posting_date, jv.user_remark
from `tabJournal Voucher` jv, `tabJournal Voucher Detail` jvd
where jvd.parent = jv.name and jvd.account = %s and jv.docstatus = 1
and (ifnull(jvd.against_invoice, '') = '' and ifnull(jvd.against_voucher, '') = '' and ifnull(jvd.against_jv, '') = '' )
and jv.%s like %s order by jv.name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
@frappe.whitelist()
def get_outstanding(args):
args = eval(args)
if args.get("doctype") == "Journal Voucher" and args.get("account"):
against_jv_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabJournal Voucher Detail` where parent=%s and account=%s
and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')=''
and ifnull(against_jv, '')=''""", (args['docname'], args['account']))
against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0
if against_jv_amount > 0:
return {"credit": against_jv_amount}
else:
return {"debit": -1* against_jv_amount}
elif args.get("doctype") == "Sales Invoice":
return {
"credit": flt(frappe.db.get_value("Sales Invoice", args["docname"],
"outstanding_amount"))
}
elif args.get("doctype") == "Purchase Invoice":
return {
"debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"],
"outstanding_amount"))
}
| gpl-2.0 |
andrefreitas/schwa | schwa/extraction/git_extractor.py | 1 | 8652 | # Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for the Git Extractor. """
import multiprocessing
import os
import git
from .abstract_extractor import *
from schwa.repository import *
from schwa.parsing import JavaParser, ParsingError
current_repo = None # Curent repository wrapper
def extract_commit_wrapper(hexsha):
""" Multiprocessing wrapper for extracting a commit"""
return current_repo.extract_commit(hexsha)
class GitExtractor(AbstractExtractor):
""" A Git Extractor.
This class relies on GitPython library to extract data from a local repository.
"""
def __init__(self, path):
super().__init__(path)
self.repo = git.Repo(path, odbt=git.GitCmdObjectDB)
def extract(self, ignore_regex="^$", max_commits=None, method_granularity=False, parallel=True):
""" Extract a repository.
It extracts commits from a repository that are important to the analysis. Therefore, only commits
related to code are important. For the sake of supporting big repositories, it is possible to set
the maximum number of commits.
Args:
ignore_regex: An optional string that is a regex pattern to ignore unnecessary files.
max_commits: An optional int that is the maximum number of commits to extract since the last one.
method_granularity: An optional boolean that enables extraction until the method granularity.
parallel: An optional boolean that enables multiprocessing extraction.
Returns:
A Repository instance.
"""
# Multiprocessing setup
global current_repo
current_repo = self
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError: # pragma: no cover
cpus = 2 # pragma: no cover
self.ignore_regex = ignore_regex
self.method_granularity = method_granularity
# Extract commits
iter_commits = self.repo.iter_commits(max_count=max_commits) if max_commits else self.repo.iter_commits()
commits = [commit.hexsha for commit in iter_commits]
pool = multiprocessing.Pool(processes=cpus)
if parallel and os.name != "nt":
commits = pool.map(extract_commit_wrapper, commits)
else:
commits = map(extract_commit_wrapper, commits)
commits = list(reversed([commit for commit in commits if commit]))
# Timestamps
try:
begin_ts = list(self.repo.iter_commits())[-1].committed_date
last_ts = list(self.repo.iter_commits(max_count=1))[0].committed_date
except TypeError:
raise RepositoryExtractionException("Error extracting repository: cannot parse begin or last timestamps!")
# Repository
repo = Repository(commits, begin_ts, last_ts)
return repo
def extract_commit(self, hexsha):
""" Extract a commit.
Iterates over commits diffs to extract important information such as changed files, classes and methods.
Args:
hexsha: A string representing the commit ID
Returns:
A Commit instance.
"""
commit = self.repo.commit(hexsha)
_id = hexsha
try:
message = commit.message
except (UnicodeDecodeError, TypeError): # pragma: no cover
return None # pragma: no cover
author = commit.author.email
timestamp = commit.committed_date
diffs_list = []
# First commit
if not commit.parents:
for blob in commit.tree.traverse():
if self.is_good_blob(blob):
diffs_list.extend(self.get_new_file_diffs(blob))
else:
for parent in commit.parents:
for diff in parent.diff(commit):
# Shortcut
if not self.is_good_blob(diff.a_blob) and not self.is_good_blob(diff.b_blob):
continue
# New file
if diff.new_file and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_new_file_diffs(diff.b_blob))
# Renamed file
elif diff.renamed and self.is_good_blob(diff.a_blob) and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_renamed_file_diffs(diff.a_blob, diff.b_blob))
# Deleted file
elif diff.deleted_file:
diffs_list.append(DiffFile(file_a=diff.a_blob.path, removed=True))
# Modified file
else:
diffs_list.extend(self.get_modified_file_diffs(diff.a_blob, diff.b_blob))
return Commit(_id, message, author, timestamp, diffs_list) if len(diffs_list) > 0 else None
def get_new_file_diffs(self, blob):
diffs_list = [DiffFile(file_b=blob.path, added=True)]
if can_parse_file(blob.path) and self.method_granularity:
source = GitExtractor.get_source(blob)
file_parsed = GitExtractor.parse(blob.path, source)
if file_parsed:
classes_set = file_parsed.get_classes_set()
methods_set = file_parsed.get_functions_set()
for c in classes_set:
diffs_list.append(DiffClass(file_name=blob.path, class_b=c, added=True))
for c, m in methods_set:
diffs_list.append(DiffMethod(file_name=blob.path, class_name=c, method_b=m, added=True))
return diffs_list
def get_modified_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, modified=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def get_renamed_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, renamed=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def is_good_blob(self, blob):
return blob and is_code_file(blob.path) and not re.search(self.ignore_regex, blob.path)
@staticmethod
def get_source(blob):
try:
stream = blob.data_stream.read()
source = stream.decode("UTF-8")
except AttributeError:
raise ParsingError
return source
@staticmethod
def parse(path, source):
try:
if "java" in path:
components = JavaParser.parse(source)
return components
except ParsingError:
pass
return False
@staticmethod
def diff(file_a, file_b):
try:
if "java" in file_a[0]:
components_diff = JavaParser.diff(file_a, file_b)
return components_diff
except ParsingError:
pass
return [] | mit |
shakamunyi/neutron | neutron/tests/unit/plugins/ibm/test_sdnve_agent.py | 15 | 4482 | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_config import cfg
from neutron.agent.linux import ip_lib
from neutron.plugins.ibm.agent import sdnve_neutron_agent
from neutron.tests import base
NOTIFIER = ('neutron.plugins.ibm.'
'sdnve_neutron_plugin.AgentNotifierApi')
class CreateAgentConfigMap(base.BaseTestCase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(sdnve_neutron_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_using_controller_ips(self):
cfg.CONF.set_override('controller_ips',
['10.10.10.1', '10.10.10.2'], group='SDNVE')
cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['controller_ip'], '10.10.10.1')
def test_create_agent_config_using_interface_mappings(self):
cfg.CONF.set_override('interface_mappings',
['interface1 : eth1', 'interface2 : eth2'],
group='SDNVE')
cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['interface_mappings'],
{'interface1': 'eth1', 'interface2': 'eth2'})
class TestSdnveNeutronAgent(base.BaseTestCase):
def setUp(self):
super(TestSdnveNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_override('integration_bridge',
'br_int', group='SDNVE')
kwargs = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
with contextlib.nested(
mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.'
'SdnveNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)):
self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs)
def test_setup_physical_interfaces(self):
with mock.patch.object(self.agent.int_br,
'add_port') as add_port_func:
with mock.patch.object(ip_lib,
'device_exists',
return_valxue=True):
self.agent.setup_physical_interfaces({"interface1": "eth1"})
add_port_func.assert_called_once_with('eth1')
def test_setup_physical_interfaces_none(self):
with mock.patch.object(self.agent.int_br,
'add_port') as add_port_func:
with mock.patch.object(ip_lib,
'device_exists',
return_valxue=True):
self.agent.setup_physical_interfaces({})
self.assertFalse(add_port_func.called)
def test_get_info_set_controller(self):
with mock.patch.object(self.agent.int_br,
'set_controller') as set_controller_func:
kwargs = {}
kwargs['info'] = {'new_controller': '10.10.10.1'}
self.agent.info_update('dummy', **kwargs)
set_controller_func.assert_called_once_with(['tcp:10.10.10.1'])
def test_get_info(self):
with mock.patch.object(self.agent.int_br,
'set_controller') as set_controller_func:
kwargs = {}
self.agent.info_update('dummy', **kwargs)
self.assertFalse(set_controller_func.called)
| apache-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/webkit/Tools/QueueStatusServer/model/queuestatus.py | 19 | 2103 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
from model.queuepropertymixin import QueuePropertyMixin
class QueueStatus(db.Model, QueuePropertyMixin):
author = db.UserProperty()
queue_name = db.StringProperty()
bot_id = db.StringProperty()
active_bug_id = db.IntegerProperty()
active_patch_id = db.IntegerProperty()
message = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
results_file = db.BlobProperty()
def is_retry_request(self):
return self.message == "Retry" # From AbstractQueue._retry_status
| gpl-2.0 |
vianuevm/Webparser | ENV/Lib/site-packages/werkzeug/wsgi.py | 146 | 37745 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| gpl-2.0 |
petewarden/tensorflow | tensorflow/python/compiler/tensorrt/test/memory_alignment_test.py | 25 | 2529 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class MemoryAlignmentTest(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
def GraphFn(self, inp):
dtype = inp.dtype
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 15, 15, 3]],
[[2, 15, 15, 10]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-06 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 0.1
if __name__ == "__main__":
test.main()
| apache-2.0 |
SteveXiSong/ECE757-SnoopingPredictions | configs/topologies/Pt2Pt.py | 47 | 2741 | # Copyright (c) 2011 Advanced Micro Devices, Inc.
# 2011 Massachusetts Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Tushar Krishna
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
class Pt2Pt(SimpleTopology):
description='Pt2Pt'
def __init__(self, controllers):
self.nodes = controllers
def makeTopology(self, options, network, IntLink, ExtLink, Router):
nodes = self.nodes
# Create an individual router for each controller, and connect all to all.
routers = [Router(router_id=i) for i in range(len(nodes))]
network.routers = routers
ext_links = [ExtLink(link_id=i, ext_node=n, int_node=routers[i])
for (i, n) in enumerate(nodes)]
network.ext_links = ext_links
link_count = len(nodes)
int_links = []
for i in xrange(len(nodes)):
for j in xrange(len(nodes)):
if (i != j):
link_count += 1
int_links.append(IntLink(link_id=link_count,
node_a=routers[i],
node_b=routers[j]))
network.int_links = int_links
| bsd-3-clause |
memo/tensorflow | tensorflow/python/ops/partitioned_variables.py | 132 | 12318 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating partitioned variables.
This is a convenient abstraction to partition a large variable across
multiple smaller variables that can be assigned to different devices.
The full variable can be reconstructed by concatenating the smaller variables.
Using partitioned variables instead of a single variable is mostly a
performance choice. It however also has an impact on:
1. Random initialization, as the random number generator is called once per
slice
2. Updates, as they happen in parallel across slices
A key design goal is to allow a different graph to repartition a variable
with the same name but different slicings, including possibly no partitions.
TODO(touts): If an initializer provides a seed, the seed must be changed
deterministically for each slice, maybe by adding one to it, otherwise each
slice will use the same values. Maybe this can be done by passing the
slice offsets to the initializer functions.
Typical usage:
```python
# Create a list of partitioned variables with:
vs = create_partitioned_variables(
<shape>, <slicing>, <initializer>, name=<optional-name>)
# Pass the list as inputs to embedding_lookup for sharded, parallel lookup:
y = embedding_lookup(vs, ids, partition_strategy="div")
# Or fetch the variables in parallel to speed up large matmuls:
z = matmul(x, concat(slice_dim, vs))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
def variable_axis_size_partitioner(
max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):
"""Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
This partitioner will shard a Variable along one axis, attempting to keep
the maximum shard size below `max_shard_bytes`. In practice, this is not
always possible when sharding along only one axis. When this happens,
this axis is sharded as much as possible (i.e., every dimension becomes
a separate shard).
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost
`64MB`, to keep below the protobuf byte limit.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
axis: The axis to partition along. Default: outermost axis.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
max_shards: The maximum number of shards in int created taking precedence
over `max_shard_bytes`.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
Raises:
ValueError: If any of the byte counts are non-positive.
"""
if max_shard_bytes < 1 or bytes_per_string_element < 1:
raise ValueError(
"Both max_shard_bytes and bytes_per_string_element must be positive.")
if max_shards and max_shards < 1:
raise ValueError(
"max_shards must be positive.")
def _partitioner(shape, dtype):
"""Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("shape is not a TensorShape: %s" % shape)
if not shape.is_fully_defined():
raise ValueError("shape is not fully defined: %s" % shape)
if not isinstance(dtype, dtypes.DType):
raise ValueError("dtype is not a DType: %s" % dtype)
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (
shape.num_elements() / shape[axis].value) * element_size
# How many slices can we fit on one shard of size at most max_shard_bytes?
# At least one slice is required.
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
# How many shards do we need for axis given that each shard fits
# slices_per_shard slices from a total of shape[axis].value slices?
axis_shards = int(math.ceil(1.0 * shape[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
return _partitioner
def min_max_variable_partitioner(max_partitions=1, axis=0,
min_slice_size=256 << 10,
bytes_per_string_element=16):
"""Partitioner to allocate minimum size per slice.
Returns a partitioner that partitions the variable of given shape and dtype
such that each partition has a minimum of `min_slice_size` slice of the
variable. The maximum number of such partitions (upper bound) is given by
`max_partitions`.
Args:
max_partitions: Upper bound on the number of partitions. Defaults to 1.
axis: Axis along which to partition the variable. Defaults to 0.
min_slice_size: Minimum size of the variable slice per partition. Defaults
to 256K.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, dtype):
"""Partitioner that partitions list for a variable of given shape and type.
Ex: Consider partitioning a variable of type float32 with
shape=[1024, 1024].
If `max_partitions` >= 16, this function would return
[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].
If `max_partitions` < 16, this function would return
[`max_partitions`, 1].
Args:
shape: Shape of the variable.
dtype: Type of the variable.
Returns:
List of partitions for each axis (currently only one axis can be
partitioned).
Raises:
ValueError: If axis to partition along does not exist for the variable.
"""
if axis >= len(shape):
raise ValueError("Can not partition variable along axis %d when shape is "
"only %s" % (axis, shape))
if dtype.base_dtype == dtypes.string:
bytes_per_element = bytes_per_string_element
else:
bytes_per_element = dtype.size
total_size_bytes = shape.num_elements() * bytes_per_element
partitions = total_size_bytes / min_slice_size
partitions_list = [1] * len(shape)
# We can not partition the variable beyond what its shape or
# `max_partitions` allows.
partitions_list[axis] = max(1, min(shape[axis].value,
max_partitions,
int(math.ceil(partitions))))
return partitions_list
return _partitioner
def fixed_size_partitioner(num_shards, axis=0):
"""Partitioner to specify a fixed number of shards along given axis.
Args:
num_shards: `int`, number of shards to partition variable.
axis: `int`, axis to partition on.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, **unused_args):
partitions_list = [1] * len(shape)
partitions_list[axis] = min(num_shards, shape[axis].value)
return partitions_list
return _partitioner
def create_partitioned_variables(
shape, slicing, initializer, dtype=dtypes.float32,
trainable=True, collections=None, name=None, reuse=None):
"""Create a list of partitioned variables according to the given `slicing`.
Currently only one dimension of the full variable can be sliced, and the
full variable can be reconstructed by the concatenation of the returned
list along that dimension.
Args:
shape: List of integers. The shape of the full variable.
slicing: List of integers. How to partition the variable.
Must be of the same length as `shape`. Each value
indicate how many slices to create in the corresponding
dimension. Presently only one of the values can be more than 1;
that is, the variable can only be sliced along one dimension.
For convenience, The requested number of partitions does not have to
divide the corresponding dimension evenly. If it does not, the
shapes of the partitions are incremented by 1 starting from partition
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name for the full variable. Defaults to
`"PartitionedVariable"` and gets uniquified automatically.
reuse: Boolean or `None`; if `True` and name is set, it would reuse
previously created variables. if `False` it will create new variables.
if `None`, it would inherit the parent scope reuse.
Returns:
A list of Variables corresponding to the slicing.
Raises:
ValueError: If any of the arguments is malformed.
"""
logging.warn(
"create_partitioned_variables is deprecated. Use "
"tf.get_variable with a partitioner set, or "
"tf.get_partitioned_variable_list, instead.")
if len(shape) != len(slicing):
raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
"must have the length: shape: %s, slicing: %s" %
(shape, slicing))
if len(shape) < 1:
raise ValueError("A partitioned Variable must have rank at least 1: "
"shape: %s" % shape)
# Legacy: we are provided the slicing directly, so just pass it to
# the partitioner.
partitioner = lambda **unused_kwargs: slicing
with variable_scope.variable_scope(
name, "PartitionedVariable", reuse=reuse):
# pylint: disable=protected-access
partitioned_var = variable_scope._get_partitioned_variable(
name=None,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=partitioner,
collections=collections)
return list(partitioned_var)
# pylint: enable=protected-access
| apache-2.0 |
Gateworks/platform-external-chromium_org | third_party/protobuf/python/setup.py | 135 | 7881 | #! /usr/bin/python
#
# See README for usage instructions.
import sys
import os
import subprocess
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension
except ImportError:
try:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n")
raise
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
maintainer_email = "protobuf@googlegroups.com"
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print "Generating %s..." % output
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_import_public.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/test_bad_identifiers.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
generate_proto("google/protobuf/internal/factory_test1.proto")
generate_proto("google/protobuf/internal/factory_test2.proto")
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
GenerateUnittestProtos()
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
import google.protobuf.internal.unknown_fields_test as unknown_fields_test
import google.protobuf.internal.descriptor_database_test \
as descriptor_database_test
import google.protobuf.internal.descriptor_pool_test as descriptor_pool_test
import google.protobuf.internal.message_factory_test as message_factory_test
import google.protobuf.internal.message_cpp_test as message_cpp_test
import google.protobuf.internal.reflection_cpp_generated_test \
as reflection_cpp_generated_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
GenerateUnittestProtos()
# Make sure google.protobuf.compiler is a valid package.
open('google/protobuf/compiler/__init__.py', 'a').close()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
if __name__ == '__main__':
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print "Using EXPERIMENTAL C++ Implmenetation."
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf',
version = '2.5.0-pre',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.enum_type_wrapper',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.descriptor_database',
'google.protobuf.descriptor_pool',
'google.protobuf.message_factory',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
cmdclass = { 'clean': clean, 'build_py': build_py },
install_requires = ['setuptools'],
ext_modules = ext_module_list,
url = 'http://code.google.com/p/protobuf/',
maintainer = maintainer_email,
maintainer_email = 'protobuf@googlegroups.com',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Protocol Buffers are Google's data interchange format.",
)
| bsd-3-clause |
nischalshrestha/recognize | lib/httplib2/socks.py | 811 | 18459 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| mit |
cchamberlain/gyp | test/copies/gyptest-all.py | 100 | 1368 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp',
'-G', 'xcode_ninja_target_pattern=^(?!copies_null)',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', test.ALL, chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test()
| bsd-3-clause |
Beauhurst/django | tests/modeladmin/tests.py | 23 | 26908 | from datetime import date
from django import forms
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.auth.models import User
from django.db import models
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import Band, Concert
class MockRequest:
pass
class MockSuperUser:
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date'])
self.assertIsNone(ma.get_exclude(request, self.band))
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# get_fieldsets() is called when figuring out form fields (#18681).
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
A lookup_allowed allows a parameter whose field lookup doesn't exist.
(#21129).
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
@isolate_apps('modeladmin')
def test_lookup_allowed_onetoone(self):
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
class Employee(models.Model):
department = models.ForeignKey(Department, models.CASCADE, to_field="code")
class EmployeeProfile(models.Model):
employee = models.OneToOneField(Employee, models.CASCADE)
class EmployeeInfo(models.Model):
employee = models.OneToOneField(Employee, models.CASCADE)
description = models.CharField(max_length=100)
class EmployeeProfileAdmin(ModelAdmin):
list_filter = [
'employee__employeeinfo__description',
'employee__department__code',
]
ma = EmployeeProfileAdmin(EmployeeProfile, self.site)
# Reverse OneToOneField
self.assertIs(ma.lookup_allowed('employee__employeeinfo__description', 'test_value'), True)
# OneToOneField and ForeignKey
self.assertIs(ma.lookup_allowed('employee__department__code', 'test_value'), True)
def test_field_arguments(self):
# If fields is specified, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If fields or fieldsets is specified, it should exclude fields on the
# Form class to the fields specified. This may cause errors to be
# raised in the db layer if required model fields aren't in fields/
# fieldsets, but that's preferable to ghost errors where a field in the
# Form class isn't being displayed because it's not in fields/fieldsets.
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when used in
conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496).
"""
# With ModelAdmin
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date'])
# With InlineModelAdmin
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE']
)
def test_overriding_get_exclude(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_overrides_exclude(self):
class BandAdmin(ModelAdmin):
exclude = ['bio']
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_takes_obj(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
if obj:
return ['sign_date']
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields),
['name', 'bio']
)
def test_custom_form_validation(self):
# If a form is specified, it should use it allowing custom validation
# to work properly. This won't break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `ModelAdmin.get_form()` overrides all
other declarations (#8999).
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super().get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
The `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations (#8999).
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super().get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_fields(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['main_band', 'opening_band', 'day', 'transport']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_exclude(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_queryset_override(self):
# If the queryset of a ModelChoiceField in a custom form is overridden,
# RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band.objects.create(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
ma = ModelAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id)
)
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id
)
def test_regression_for_ticket_15820(self):
"""
`obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super().get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
)
self.assertEqual(type(cmafa.base_fields['transport'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band']
)
def test_log_actions(self):
ma = ModelAdmin(Band, self.site)
mock_request = MockRequest()
mock_request.user = User.objects.create(username='bill')
self.assertEqual(ma.log_addition(mock_request, self.band, 'added'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'changed'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'deleted'), LogEntry.objects.latest('id'))
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser:
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
has_add_permission returns True for users who can add objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
has_change_permission returns True for users who can edit objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
has_delete_permission returns True for users who can delete objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
as_module_permission returns True for users who have any permission
for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| bsd-3-clause |
dulems/hue | desktop/core/ext-py/Paste-1.7.2/paste/util/ip4.py | 27 | 9271 | # -*- coding: iso-8859-15 -*-
"""IP4 address range set implementation.
Implements an IPv4-range type.
Copyright (C) 2006, Heiko Wundram.
Released under the MIT-license.
"""
# Version information
# -------------------
__author__ = "Heiko Wundram <me@modelnine.org>"
__version__ = "0.2"
__revision__ = "3"
__date__ = "2006-01-20"
# Imports
# -------
import intset
import socket
# IP4Range class
# --------------
class IP4Range(intset.IntSet):
"""IP4 address range class with efficient storage of address ranges.
Supports all set operations."""
_MINIP4 = 0
_MAXIP4 = (1<<32) - 1
_UNITYTRANS = "".join([chr(n) for n in range(256)])
_IPREMOVE = "0123456789."
def __init__(self,*args):
"""Initialize an ip4range class. The constructor accepts an unlimited
number of arguments that may either be tuples in the form (start,stop),
integers, longs or strings, where start and stop in a tuple may
also be of the form integer, long or string.
Passing an integer or long means passing an IPv4-address that's already
been converted to integer notation, whereas passing a string specifies
an address where this conversion still has to be done. A string
address may be in the following formats:
- 1.2.3.4 - a plain address, interpreted as a single address
- 1.2.3 - a set of addresses, interpreted as 1.2.3.0-1.2.3.255
- localhost - hostname to look up, interpreted as single address
- 1.2.3<->5 - a set of addresses, interpreted as 1.2.3.0-1.2.5.255
- 1.2.0.0/16 - a set of addresses, interpreted as 1.2.0.0-1.2.255.255
Only the first three notations are valid if you use a string address in
a tuple, whereby notation 2 is interpreted as 1.2.3.0 if specified as
lower bound and 1.2.3.255 if specified as upper bound, not as a range
of addresses.
Specifying a range is done with the <-> operator. This is necessary
because '-' might be present in a hostname. '<->' shouldn't be, ever.
"""
# Special case copy constructor.
if len(args) == 1 and isinstance(args[0],IP4Range):
super(IP4Range,self).__init__(args[0])
return
# Convert arguments to tuple syntax.
args = list(args)
for i in range(len(args)):
argval = args[i]
if isinstance(argval,str):
if "<->" in argval:
# Type 4 address.
args[i] = self._parseRange(*argval.split("<->",1))
continue
elif "/" in argval:
# Type 5 address.
args[i] = self._parseMask(*argval.split("/",1))
else:
# Type 1, 2 or 3.
args[i] = self._parseAddrRange(argval)
elif isinstance(argval,tuple):
if len(tuple) <> 2:
raise ValueError("Tuple is of invalid length.")
addr1, addr2 = argval
if isinstance(addr1,str):
addr1 = self._parseAddrRange(addr1)[0]
elif not isinstance(addr1,(int,long)):
raise TypeError("Invalid argument.")
if isinstance(addr2,str):
addr2 = self._parseAddrRange(addr2)[1]
elif not isinstance(addr2,(int,long)):
raise TypeError("Invalid argument.")
args[i] = (addr1,addr2)
elif not isinstance(argval,(int,long)):
raise TypeError("Invalid argument.")
# Initialize the integer set.
super(IP4Range,self).__init__(min=self._MINIP4,max=self._MAXIP4,*args)
# Parsing functions
# -----------------
def _parseRange(self,addr1,addr2):
naddr1, naddr1len = _parseAddr(addr1)
naddr2, naddr2len = _parseAddr(addr2)
if naddr2len < naddr1len:
naddr2 += naddr1&(((1<<((naddr1len-naddr2len)*8))-1)<<
(naddr2len*8))
naddr2len = naddr1len
elif naddr2len > naddr1len:
raise ValueError("Range has more dots than address.")
naddr1 <<= (4-naddr1len)*8
naddr2 <<= (4-naddr2len)*8
naddr2 += (1<<((4-naddr2len)*8))-1
return (naddr1,naddr2)
def _parseMask(self,addr,mask):
naddr, naddrlen = _parseAddr(addr)
naddr <<= (4-naddrlen)*8
try:
if not mask:
masklen = 0
else:
masklen = int(mask)
if not 0 <= masklen <= 32:
raise ValueError
except ValueError:
try:
mask = _parseAddr(mask,False)
except ValueError:
raise ValueError("Mask isn't parseable.")
remaining = 0
masklen = 0
if not mask:
masklen = 0
else:
while not (mask&1):
remaining += 1
while (mask&1):
mask >>= 1
masklen += 1
if remaining+masklen <> 32:
raise ValueError("Mask isn't a proper host mask.")
naddr1 = naddr & (((1<<masklen)-1)<<(32-masklen))
naddr2 = naddr1 + (1<<(32-masklen)) - 1
return (naddr1,naddr2)
def _parseAddrRange(self,addr):
naddr, naddrlen = _parseAddr(addr)
naddr1 = naddr<<((4-naddrlen)*8)
naddr2 = ( (naddr<<((4-naddrlen)*8)) +
(1<<((4-naddrlen)*8)) - 1 )
return (naddr1,naddr2)
# Utility functions
# -----------------
def _int2ip(self,num):
rv = []
for i in range(4):
rv.append(str(num&255))
num >>= 8
return ".".join(reversed(rv))
# Iterating
# ---------
def iteraddresses(self):
"""Returns an iterator which iterates over ips in this iprange. An
IP is returned in string form (e.g. '1.2.3.4')."""
for v in super(IP4Range,self).__iter__():
yield self._int2ip(v)
def iterranges(self):
"""Returns an iterator which iterates over ip-ip ranges which build
this iprange if combined. An ip-ip pair is returned in string form
(e.g. '1.2.3.4-2.3.4.5')."""
for r in self._ranges:
if r[1]-r[0] == 1:
yield self._int2ip(r[0])
else:
yield '%s-%s' % (self._int2ip(r[0]),self._int2ip(r[1]-1))
def itermasks(self):
"""Returns an iterator which iterates over ip/mask pairs which build
this iprange if combined. An IP/Mask pair is returned in string form
(e.g. '1.2.3.0/24')."""
for r in self._ranges:
for v in self._itermasks(r):
yield v
def _itermasks(self,r):
ranges = [r]
while ranges:
cur = ranges.pop()
curmask = 0
while True:
curmasklen = 1<<(32-curmask)
start = (cur[0]+curmasklen-1)&(((1<<curmask)-1)<<(32-curmask))
if start >= cur[0] and start+curmasklen <= cur[1]:
break
else:
curmask += 1
yield "%s/%s" % (self._int2ip(start),curmask)
if cur[0] < start:
ranges.append((cur[0],start))
if cur[1] > start+curmasklen:
ranges.append((start+curmasklen,cur[1]))
__iter__ = iteraddresses
# Printing
# --------
def __repr__(self):
"""Returns a string which can be used to reconstruct this iprange."""
rv = []
for start, stop in self._ranges:
if stop-start == 1:
rv.append("%r" % (self._int2ip(start),))
else:
rv.append("(%r,%r)" % (self._int2ip(start),
self._int2ip(stop-1)))
return "%s(%s)" % (self.__class__.__name__,",".join(rv))
def _parseAddr(addr,lookup=True):
if lookup and addr.translate(IP4Range._UNITYTRANS, IP4Range._IPREMOVE):
try:
addr = socket.gethostbyname(addr)
except socket.error:
raise ValueError("Invalid Hostname as argument.")
naddr = 0
for naddrpos, part in enumerate(addr.split(".")):
if naddrpos >= 4:
raise ValueError("Address contains more than four parts.")
try:
if not part:
part = 0
else:
part = int(part)
if not 0 <= part < 256:
raise ValueError
except ValueError:
raise ValueError("Address part out of range.")
naddr <<= 8
naddr += part
return naddr, naddrpos+1
def ip2int(addr, lookup=True):
return _parseAddr(addr, lookup=lookup)[0]
if __name__ == "__main__":
# Little test script.
x = IP4Range("172.22.162.250/24")
y = IP4Range("172.22.162.250","172.22.163.250","172.22.163.253<->255")
print x
for val in x.itermasks():
print val
for val in y.itermasks():
print val
for val in (x|y).itermasks():
print val
for val in (x^y).iterranges():
print val
for val in x:
print val
| apache-2.0 |
annelida/stuff | Scrapy/activesport/activesport/settings.py | 1 | 3027 | # -*- coding: utf-8 -*-
# Scrapy settings for activesport project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'activesport'
SPIDER_MODULES = ['activesport.spiders']
NEWSPIDER_MODULE = 'activesport.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'activesport (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'activesport.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'activesport.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'activesport.pipelines.ActivesportPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED=True
HTTPCACHE_EXPIRATION_SECS=0
HTTPCACHE_DIR='httpcache'
HTTPCACHE_IGNORE_HTTP_CODES=[]
HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit |
skosukhin/spack | var/spack/repos/builtin/packages/ncdu/package.py | 1 | 2257 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ncdu(Package):
"""Ncdu is a disk usage analyzer with an ncurses interface. It is designed
to find space hogs on a remote server where you don't have an entire
gaphical setup available, but it is a useful tool even on regular desktop
systems. Ncdu aims to be fast, simple and easy to use, and should be able
to run in any minimal POSIX-like environment with ncurses installed.
"""
homepage = "http://dev.yorhel.nl/ncdu"
url = "http://dev.yorhel.nl/download/ncdu-1.11.tar.gz"
version('1.11', '9e44240a5356b029f05f0e70a63c4d12')
version('1.10', '7535decc8d54eca811493e82d4bfab2d')
version('1.9', '93258079db897d28bb8890e2db89b1fb')
version('1.8', '94d7a821f8a0d7ba8ef3dd926226f7d5')
version('1.7', '172047c29d232724cc62e773e82e592a')
depends_on("ncurses")
def install(self, spec, prefix):
configure('--prefix=%s' % prefix,
'--with-ncurses=%s' % spec['ncurses'])
make()
make("install")
| lgpl-2.1 |
bbossola/katas | trains/core/biz.py | 1 | 5195 | from math import inf
class Context():
def __init__(self, max_len, direct=False, deep=False):
self._max_len = max_len
self._direct = direct
self._deep = deep
self._routes = []
def routes(self):
return self._routes
def max_depth(self):
return self._max_len
def direct(self):
return self._direct
def deep(self):
return self._deep
def store(self, route):
self._routes.append(route)
def __len__(self):
return len(self._routes)
def defaut_context():
return Context(10)
class Filters():
@staticmethod
def all():
return lambda context, route: True
@staticmethod
def max_stops(max_stops):
return lambda context, route: route.stops() < max_stops
@staticmethod
def distance_less_than(max_distance):
return lambda context, route: route.distance() < max_distance
@staticmethod
def compose(*filters):
def check(context, route):
for filtrum in filters:
if filtrum(context, route) == False:
return False
return True
return check
@staticmethod
def valid(station_from, station_to):
def check(context, route):
return route.start() == station_from and route.end() == station_to
return check
@staticmethod
def exact_stops(max_stops):
return lambda context, route: route.stops() == max_stops
class Route():
def __init__(self, steps=[], distance=0):
self._steps = steps
self._distance = distance
def via(self, station_from, station_to):
new_steps = list(self._steps)
new_steps.append(station_to)
new_distance = self._distance + station_from.distance_to(station_to)
return Route(new_steps, new_distance)
def concatenate(self, other_route):
new_steps = list(self._steps)
new_steps.append(other_route._steps)
new_distance = self._distance + other_route._distance
return Route(new_steps, new_distance)
def distance(self):
return self._distance
def stops(self):
return max(0, len(self._steps) - 2)
def shorter_than(self, other):
return self._distance < other.distance()
def start(self):
return self._steps[0] if len(self._steps) > 0 else None
def end(self):
return self._steps[-1] if len(self._steps) > 0 else None
def __str__(self):
text = str(self._distance) + "-"
for step in self._steps:
text = text + str(step)
return text
def __len__(self):
return len(self._steps)
NO_ROUTE = Route([], inf)
class Station():
def __init__(self, iden):
self._id = iden
self._links = {}
def link(self, other, distance):
self._links[other] = distance
# A -> B -> C
def route_to(self, other, context=defaut_context(), filtrum=Filters.all()):
return self._route_to(Route([self]), other, context, filtrum)
def _route_to(self, route, other, context, filtrum):
if self.connected_to(other):
result = route.via(self, other)
if filtrum(context, result):
context.store(result)
if context.deep() == False:
return result
if len(route) < context.max_depth() and context.direct() == False:
result = NO_ROUTE
for station in self._links:
route_try = station._route_to(route.via(self, station), other, context, filtrum)
result = route_try if route_try.shorter_than(result) else result
if filtrum(context, result):
context.store(route)
return result
else:
return NO_ROUTE
def connected_to(self, other):
return other in self._links
def distance_to(self, other):
return self._links[other] if self.connected_to(other) else inf
def id(self):
return self._id
def __str__(self):
return self._id
class Railway():
def __init__(self):
self._size = 0
def newStation(self, name):
station = Station(name)
self._size = self._size + 1
return station
def all_routes(self, filters, *stations):
context = Context(max_len=10, deep=True)
self._route(stations, context, filters)
return context.routes()
def best_route(self, *stations):
context = Context(max_len=self._size)
return self._route(stations, context)
def best_direct_route(self, *stations):
context = Context(max_len=self._size, direct=True)
return self._route(stations, context)
def _route(self, stations, context, user_filter=Filters.all()):
result = Route()
start = None
for station in stations:
if start is None:
start = station
else:
filtrum = Filters.compose(user_filter, Filters.valid(start, station))
result = result.concatenate(start.route_to(station, context, filtrum))
start = station
return result
| mit |
ahmed-mahran/hue | desktop/core/ext-py/kazoo-2.0/kazoo/security.py | 36 | 4365 | """Kazoo Security"""
from base64 import b64encode
from collections import namedtuple
import hashlib
# Represents a Zookeeper ID and ACL object
Id = namedtuple('Id', 'scheme id')
class ACL(namedtuple('ACL', 'perms id')):
"""An ACL for a Zookeeper Node
An ACL object is created by using an :class:`Id` object along with
a :class:`Permissions` setting. For convenience,
:meth:`make_digest_acl` should be used to create an ACL object with
the desired scheme, id, and permissions.
"""
@property
def acl_list(self):
perms = []
if self.perms & Permissions.ALL == Permissions.ALL:
perms.append('ALL')
return perms
if self.perms & Permissions.READ == Permissions.READ:
perms.append('READ')
if self.perms & Permissions.WRITE == Permissions.WRITE:
perms.append('WRITE')
if self.perms & Permissions.CREATE == Permissions.CREATE:
perms.append('CREATE')
if self.perms & Permissions.DELETE == Permissions.DELETE:
perms.append('DELETE')
if self.perms & Permissions.ADMIN == Permissions.ADMIN:
perms.append('ADMIN')
return perms
def __repr__(self):
return 'ACL(perms=%r, acl_list=%s, id=%r)' % (
self.perms, self.acl_list, self.id)
class Permissions(object):
READ = 1
WRITE = 2
CREATE = 4
DELETE = 8
ADMIN = 16
ALL = 31
# Shortcuts for common Ids
ANYONE_ID_UNSAFE = Id('world', 'anyone')
AUTH_IDS = Id('auth', '')
# Shortcuts for common ACLs
OPEN_ACL_UNSAFE = [ACL(Permissions.ALL, ANYONE_ID_UNSAFE)]
CREATOR_ALL_ACL = [ACL(Permissions.ALL, AUTH_IDS)]
READ_ACL_UNSAFE = [ACL(Permissions.READ, ANYONE_ID_UNSAFE)]
def make_digest_acl_credential(username, password):
"""Create a SHA1 digest credential"""
credential = username.encode('utf-8') + b":" + password.encode('utf-8')
cred_hash = b64encode(hashlib.sha1(credential).digest()).strip()
return username + ":" + cred_hash.decode('utf-8')
def make_acl(scheme, credential, read=False, write=False,
create=False, delete=False, admin=False, all=False):
"""Given a scheme and credential, return an :class:`ACL` object
appropriate for use with Kazoo.
:param scheme: The scheme to use. I.e. `digest`.
:param credential:
A colon separated username, password. The password should be
hashed with the `scheme` specified. The
:meth:`make_digest_acl_credential` method will create and
return a credential appropriate for use with the `digest`
scheme.
:param write: Write permission.
:type write: bool
:param create: Create permission.
:type create: bool
:param delete: Delete permission.
:type delete: bool
:param admin: Admin permission.
:type admin: bool
:param all: All permissions.
:type all: bool
:rtype: :class:`ACL`
"""
if all:
permissions = Permissions.ALL
else:
permissions = 0
if read:
permissions |= Permissions.READ
if write:
permissions |= Permissions.WRITE
if create:
permissions |= Permissions.CREATE
if delete:
permissions |= Permissions.DELETE
if admin:
permissions |= Permissions.ADMIN
return ACL(permissions, Id(scheme, credential))
def make_digest_acl(username, password, read=False, write=False,
create=False, delete=False, admin=False, all=False):
"""Create a digest ACL for Zookeeper with the given permissions
This method combines :meth:`make_digest_acl_credential` and
:meth:`make_acl` to create an :class:`ACL` object appropriate for
use with Kazoo's ACL methods.
:param username: Username to use for the ACL.
:param password: A plain-text password to hash.
:param write: Write permission.
:type write: bool
:param create: Create permission.
:type create: bool
:param delete: Delete permission.
:type delete: bool
:param admin: Admin permission.
:type admin: bool
:param all: All permissions.
:type all: bool
:rtype: :class:`ACL`
"""
cred = make_digest_acl_credential(username, password)
return make_acl("digest", cred, read=read, write=write, create=create,
delete=delete, admin=admin, all=all)
| apache-2.0 |
lyceel/engine | build/android/pylib/utils/json_results_generator_unittest.py | 87 | 7184 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Most of this file was ported over from Blink's
# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
#
import unittest
import json
from pylib.utils import json_results_generator
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_name = 'DUMMY_BUILD_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
self._orig_write_json = json_results_generator.WriteJSON
# unused arguments ... pylint: disable=W0613
def _WriteJSONStub(json_object, file_path, callback=None):
pass
json_results_generator.WriteJSON = _WriteJSONStub
def tearDown(self):
json_results_generator.WriteJSON = self._orig_write_json
def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
tests_set = set(passed_tests_list) | set(failed_tests_list)
DISABLED_tests = set([t for t in tests_set
if t.startswith('DISABLED_')])
FLAKY_tests = set([t for t in tests_set
if t.startswith('FLAKY_')])
FAILS_tests = set([t for t in tests_set
if t.startswith('FAILS_')])
PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
failed_tests = set(failed_tests_list) - DISABLED_tests
failed_count_map = dict([(t, 1) for t in failed_tests])
test_timings = {}
i = 0
for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
test_results_map = dict()
for test in tests_set:
test_results_map[test] = json_results_generator.TestResult(
test, failed=(test in failed_tests),
elapsed_time=test_timings[test])
generator = json_results_generator.JSONResultsGeneratorBase(
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_results_map)
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
incremental_json = generator.GetJSON()
self._VerifyJSONResults(
tests_set,
test_timings,
failed_count_map,
len(PASS_tests),
len(DISABLED_tests),
len(FLAKY_tests),
len(DISABLED_tests | failed_tests),
incremental_json,
1)
# We don't verify the results here, but at least we make sure the code
# runs without errors.
generator.GenerateJSONOutput()
generator.GenerateTimesMSFile()
def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count, json_obj, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGeneratorBase
self.assertIn(JRG.VERSION_KEY, json_obj)
self.assertIn(self.builder_name, json_obj)
buildinfo = json_obj[self.builder_name]
self.assertIn(JRG.FIXABLE, buildinfo)
self.assertIn(JRG.TESTS, buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
if tests_set or DISABLED_count:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (result_type, count) in fixable_items.iteritems():
if result_type in fixable:
fixable[result_type] = fixable[result_type] + count
else:
fixable[result_type] = count
if PASS_count:
self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
if DISABLED_count:
self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
if FLAKY_count:
self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
else:
self.assertTrue(JRG.FLAKY_RESULT not in fixable or
fixable[JRG.FLAKY_RESULT] == 0)
if failed_count_map:
tests = buildinfo[JRG.TESTS]
for test_name in failed_count_map.iterkeys():
test = self._FindTestInTrie(test_name, tests)
failed = 0
for result in test[JRG.RESULTS]:
if result[1] == JRG.FAIL_RESULT:
failed += result[0]
self.assertEqual(failed_count_map[test_name], failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
self.assertEqual(1, timing_count)
if fixable_count:
self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def _FindTestInTrie(self, path, trie):
nodes = path.split('/')
sub_trie = trie
for node in nodes:
self.assertIn(node, sub_trie)
sub_trie = sub_trie[node]
return sub_trie
def testJSONGeneration(self):
self._TestJSONGeneration([], [])
self._TestJSONGeneration(['A1', 'B1'], [])
self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._TestJSONGeneration(
['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
['FAILS_D6'])
# Generate JSON with the same test sets. (Both incremental results and
# archived results must be updated appropriately.)
self._TestJSONGeneration(
['A', 'FLAKY_B', 'DISABLED_C'],
['FAILS_D', 'FLAKY_E'])
self._TestJSONGeneration(
['A', 'DISABLED_C', 'FLAKY_E'],
['FLAKY_B', 'FAILS_D'])
self._TestJSONGeneration(
['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
['A', 'FLAKY_E'])
def testHierarchicalJSNGeneration(self):
# FIXME: Re-work tests to be more comprehensible and comprehensive.
self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
def testTestTimingsTrie(self):
individual_test_timings = []
individual_test_timings.append(
json_results_generator.TestResult(
'foo/bar/baz.html',
elapsed_time=1.2))
individual_test_timings.append(
json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.TestTimingsTrie(individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
| bsd-3-clause |
dawnpower/nova | nova/cells/utils.py | 4 | 6298 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Utility Methods
"""
import random
import sys
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
# Separator used between cell names for the 'full cell name' and routing
# path
PATH_CELL_SEP = '!'
# Separator used between cell name and item
_CELL_ITEM_SEP = '@'
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
def __init__(self):
super(ProxyObjectSerializer, self).__init__()
self.serializer = super(ProxyObjectSerializer, self)
def _process_object(self, context, objprim):
return _CellProxy.obj_from_primitive(self.serializer, objprim, context)
class _CellProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cell_with_item(self._cell_path, self._obj.id)
@property
def host(self):
return cell_with_item(self._cell_path, self._obj.host)
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'host':
return self.host
return getattr(self._obj, key)
def obj_to_primitive(self):
obj_p = self._obj.obj_to_primitive()
obj_p['cell_proxy.class_name'] = self.__class__.__name__
obj_p['cell_proxy.cell_path'] = self._cell_path
return obj_p
@classmethod
def obj_from_primitive(cls, serializer, primitive, context=None):
obj_primitive = primitive.copy()
cell_path = obj_primitive.pop('cell_proxy.cell_path', None)
klass_name = obj_primitive.pop('cell_proxy.class_name', None)
obj = serializer._process_object(context, obj_primitive)
if klass_name is not None and cell_path is not None:
klass = getattr(sys.modules[__name__], klass_name)
return klass(obj, cell_path)
else:
return obj
# dict-ish syntax sugar
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(sbauza): May be removed in the future.
"""
for name in self._obj.obj_fields:
if (self._obj.obj_attr_is_set(name) or
name in self._obj.obj_extra_fields):
if name == 'id':
yield name, self.id
elif name == 'host':
yield name, self.host
else:
yield name, getattr(self._obj, name)
def __getattr__(self, key):
return getattr(self._obj, key)
class ComputeNodeProxy(_CellProxy):
pass
class ServiceProxy(_CellProxy):
def __getattr__(self, key):
if key == 'compute_node':
# NOTE(sbauza): As the Service object is still having a nested
# ComputeNode object that consumers of this Proxy don't use, we can
# safely remove it from what's returned
raise AttributeError
return getattr(self._obj, key)
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
"""Return a generator that will return a list of active and
deleted instances to sync with parent cells. The list may
optionally be shuffled for periodic updates so that multiple
cells services aren't self-healing the same instances in nearly
lockstep.
"""
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
if not deleted:
filters['deleted'] = False
# Active instances first.
instances = db.instance_get_all_by_filters(
context, filters, 'deleted', 'asc')
if shuffle:
random.shuffle(instances)
for instance in instances:
if uuids_only:
yield instance['uuid']
else:
yield instance
def cell_with_item(cell_name, item):
"""Turn cell_name and item into <cell_name>@<item>."""
if cell_name is None:
return item
return cell_name + _CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
"""Split a combined cell@item and return them."""
result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
if len(result) == 1:
return (None, cell_and_item)
else:
return result
def add_cell_to_compute_node(compute_node, cell_name):
"""Fix compute_node attributes that should be unique. Allows
API cell to query the 'id' by cell@id.
"""
# NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it
# for adding the cell_path information
compute_proxy = ComputeNodeProxy(compute_node, cell_name)
try:
service = compute_proxy.service
except exception.ServiceNotFound:
service = None
if isinstance(service, objects.Service):
compute_proxy.service = ServiceProxy(service, cell_name)
return compute_proxy
def add_cell_to_service(service, cell_name):
"""Fix service attributes that should be unique. Allows
API cell to query the 'id' or 'host' by cell@id/host.
"""
# NOTE(sbauza): As service is a Service object, we need to wrap it
# for adding the cell_path information
service_proxy = ServiceProxy(service, cell_name)
return service_proxy
def add_cell_to_task_log(task_log, cell_name):
"""Fix task_log attributes that should be unique. In particular,
the 'id' and 'host' fields should be prepended with cell name.
"""
task_log['id'] = cell_with_item(cell_name, task_log['id'])
task_log['host'] = cell_with_item(cell_name, task_log['host'])
| apache-2.0 |
prepare/Blink_only_permissive_lic_files | Source/build/scripts/make_css_property_names.py | 8 | 7529 | #!/usr/bin/env python
import subprocess
import sys
import css_properties
import in_generator
import license
HEADER_TEMPLATE = """
%(license)s
#ifndef %(class_name)s_h
#define %(class_name)s_h
#include "core/css/parser/CSSParserMode.h"
#include "wtf/HashFunctions.h"
#include "wtf/HashTraits.h"
#include <string.h>
namespace WTF {
class AtomicString;
class String;
}
namespace blink {
enum CSSPropertyID {
CSSPropertyInvalid = 0,
%(property_enums)s
};
const int firstCSSProperty = %(first_property_id)s;
const int numCSSProperties = %(properties_count)s;
const int lastCSSProperty = %(last_property_id)d;
const int lastUnresolvedCSSProperty = %(last_unresolved_property_id)d;
const size_t maxCSSPropertyNameLength = %(max_name_length)d;
const char* getPropertyName(CSSPropertyID);
const WTF::AtomicString& getPropertyNameAtomicString(CSSPropertyID);
WTF::String getPropertyNameString(CSSPropertyID);
WTF::String getJSPropertyName(CSSPropertyID);
inline CSSPropertyID convertToCSSPropertyID(int value)
{
ASSERT((value >= firstCSSProperty && value <= lastCSSProperty) || value == CSSPropertyInvalid);
return static_cast<CSSPropertyID>(value);
}
inline CSSPropertyID resolveCSSPropertyID(CSSPropertyID id)
{
return convertToCSSPropertyID(id & ~512);
}
inline bool isPropertyAlias(CSSPropertyID id) { return id & 512; }
} // namespace blink
namespace WTF {
template<> struct DefaultHash<blink::CSSPropertyID> { typedef IntHash<unsigned> Hash; };
template<> struct HashTraits<blink::CSSPropertyID> : GenericHashTraits<blink::CSSPropertyID> {
static const bool emptyValueIsZero = true;
static void constructDeletedValue(blink::CSSPropertyID& slot, bool) { slot = static_cast<blink::CSSPropertyID>(blink::lastUnresolvedCSSProperty + 1); }
static bool isDeletedValue(blink::CSSPropertyID value) { return value == (blink::lastUnresolvedCSSProperty + 1); }
};
}
#endif // %(class_name)s_h
"""
GPERF_TEMPLATE = """
%%{
%(license)s
#include "config.h"
#include "%(class_name)s.h"
#include "core/css/HashTools.h"
#include <string.h>
#include "wtf/ASCIICType.h"
#include "wtf/text/AtomicString.h"
#include "wtf/text/WTFString.h"
namespace blink {
static const char propertyNameStringsPool[] = {
%(property_name_strings)s
};
static const unsigned short propertyNameStringsOffsets[] = {
%(property_name_offsets)s
};
%%}
%%struct-type
struct Property;
%%omit-struct-type
%%language=C++
%%readonly-tables
%%global-table
%%compare-strncmp
%%define class-name %(class_name)sHash
%%define lookup-function-name findPropertyImpl
%%define hash-function-name property_hash_function
%%define slot-name nameOffset
%%define word-array-name property_word_list
%%enum
%%%%
%(property_to_enum_map)s
%%%%
const Property* findProperty(register const char* str, register unsigned int len)
{
return %(class_name)sHash::findPropertyImpl(str, len);
}
const char* getPropertyName(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
return propertyNameStringsPool + propertyNameStringsOffsets[index];
}
const AtomicString& getPropertyNameAtomicString(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
static AtomicString* propertyStrings = new AtomicString[lastUnresolvedCSSProperty]; // Intentionally never destroyed.
AtomicString& propertyString = propertyStrings[index];
if (propertyString.isNull()) {
const char* propertyName = propertyNameStringsPool + propertyNameStringsOffsets[index];
propertyString = AtomicString(propertyName, strlen(propertyName), AtomicString::ConstructFromLiteral);
}
return propertyString;
}
String getPropertyNameString(CSSPropertyID id)
{
// We share the StringImpl with the AtomicStrings.
return getPropertyNameAtomicString(id).string();
}
String getJSPropertyName(CSSPropertyID id)
{
char result[maxCSSPropertyNameLength + 1];
const char* cssPropertyName = getPropertyName(id);
const char* propertyNamePointer = cssPropertyName;
if (!propertyNamePointer)
return emptyString();
char* resultPointer = result;
while (char character = *propertyNamePointer++) {
if (character == '-') {
char nextCharacter = *propertyNamePointer++;
if (!nextCharacter)
break;
character = (propertyNamePointer - 2 != cssPropertyName) ? toASCIIUpper(nextCharacter) : nextCharacter;
}
*resultPointer++ = character;
}
*resultPointer = '\\0';
return String(result);
}
} // namespace blink
"""
class CSSPropertyNamesWriter(css_properties.CSSProperties):
class_name = "CSSPropertyNames"
def __init__(self, in_file_path):
super(CSSPropertyNamesWriter, self).__init__(in_file_path)
self._outputs = {(self.class_name + ".h"): self.generate_header,
(self.class_name + ".cpp"): self.generate_implementation,
}
def _enum_declaration(self, property):
return " %(property_id)s = %(enum_value)s," % property
def generate_header(self):
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_enums': "\n".join(map(self._enum_declaration, self._properties_including_aliases)),
'first_property_id': self._first_enum_value,
'properties_count': len(self._properties),
'last_property_id': self._first_enum_value + len(self._properties) - 1,
'last_unresolved_property_id': max(property["enum_value"] for property in self._properties_including_aliases),
'max_name_length': max(map(len, self._properties)),
}
def generate_implementation(self):
enum_value_to_name = {property['enum_value']: property['name'] for property in self._properties_including_aliases}
property_offsets = []
property_names = []
current_offset = 0
for enum_value in range(1, max(enum_value_to_name) + 1):
property_offsets.append(current_offset)
if enum_value in enum_value_to_name:
name = enum_value_to_name[enum_value]
property_names.append(name)
current_offset += len(name) + 1
css_name_and_enum_pairs = [(property['name'], property['property_id']) for property in self._properties_including_aliases]
gperf_input = GPERF_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_name_strings': '\n'.join(' "%s\\0"' % name for name in property_names),
'property_name_offsets': '\n'.join(' %d,' % offset for offset in property_offsets),
'property_to_enum_map': '\n'.join('%s, %s' % property for property in css_name_and_enum_pairs),
}
# FIXME: If we could depend on Python 2.7, we would use subprocess.check_output
gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n']
gperf_args.extend(['-m', '50']) # Pick best of 50 attempts.
gperf_args.append('-D') # Allow duplicate hashes -> More compact code.
gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
return gperf.communicate(gperf_input)[0]
if __name__ == "__main__":
in_generator.Maker(CSSPropertyNamesWriter).main(sys.argv)
| bsd-3-clause |
oscarolar/odoo | openerp/tools/__init__.py | 38 | 1428 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
import win32
import appdirs
from config import config
from misc import *
from convert import *
from translate import *
from graph import graph
from image import *
from amount_to_text import *
from amount_to_text_en import *
from pdf_utils import *
from yaml_import import *
from sql import *
from float_utils import *
from mail import *
from debugger import *
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/lib2to3/fixes/fix_apply.py | 49 | 1894 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.get_prefix()
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.set_prefix("")
args = args.clone()
args.set_prefix("")
if kwds is not None:
kwds = kwds.clone()
kwds.set_prefix("")
l_newargs = [pytree.Leaf(token.STAR, "*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, "**"),
kwds])
l_newargs[-2].set_prefix(" ") # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
| apache-2.0 |
wshallum/ansible | lib/ansible/modules/network/nxos/nxos_pim_rp_address.py | 13 | 12732 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_pim_rp_address
version_added: "2.2"
short_description: Manages configuration of an PIM static RP address instance.
description:
- Manages configuration of an Protocol Independent Multicast (PIM) static
rendezvous point (RP) address instance.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- C(state=absent) remove the whole rp-address configuration, if existing.
options:
rp_address:
description:
- Configures a Protocol Independent Multicast (PIM) static
rendezvous point (RP) address. Valid values are
unicast addresses.
required: true
group_list:
description:
- Group range for static RP. Valid values are multicast addresses.
required: false
default: null
prefix_list:
description:
- Prefix list policy for static RP. Valid values are prefix-list
policy names.
required: false
default: null
route_map:
description:
- Route map policy for static RP. Valid values are route-map
policy names.
required: false
default: null
bidir:
description:
- Group range is treated in PIM bidirectional mode.
required: false
choices: ['true','false']
default: null
'''
EXAMPLES = '''
- nxos_pim_rp_address:
rp_address: "10.1.1.20"
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"rp_address": "10.1.1.21"}
existing:
description: list of existing pim rp-address configuration entries
returned: verbose mode
type: list
sample: []
end_state:
description: pim rp-address configuration entries after module execution
returned: verbose mode
type: list
sample: [{"bidir": false, "group_list": "224.0.0.0/4",
"rp_address": "10.1.1.21"}]
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = ['bidir']
PARAM_TO_COMMAND_KEYMAP = {
'rp_address': 'ip pim rp-address'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(config, module):
value_list = []
splitted_config = config.splitlines()
for line in splitted_config:
tmp = {}
if 'ip pim rp-address' in line:
splitted_line = line.split()
tmp['rp_address'] = splitted_line[3]
if len(splitted_line) > 5:
value = splitted_line[5]
if splitted_line[4] == 'route-map':
tmp['route_map'] = value
elif splitted_line[4] == 'prefix-list':
tmp['prefix_list'] = value
elif splitted_line[4] == 'group-list':
tmp['group_list'] = value
if 'bidir' in line:
tmp['bidir'] = True
else:
tmp['bidir'] = False
value_list.append(tmp)
return value_list
def get_existing(module, args):
existing = {}
config = str(get_config(module))
existing = get_value(config, module)
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
command = 'ip pim rp-address {0}'.format(module.params['rp_address'])
commands = build_command(proposed, command)
if commands:
candidate.add(commands, parents=[])
def build_command(param_dict, command):
for param in ['group_list', 'prefix_list', 'route_map']:
if param_dict.get(param):
command += ' {0} {1}'.format(
param.replace('_', '-'), param_dict.get(param))
if param_dict.get('bidir'):
command += ' bidir'
return [command]
def state_absent(module, existing, proposed, candidate):
commands = list()
for each in existing:
if each.get('rp_address') == proposed['rp_address']:
command = 'no ip pim rp-address {0}'.format(proposed['rp_address'])
if each.get('group_list'):
commands = build_command(each, command)
else:
commands = [command]
if commands:
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
rp_address=dict(required=True, type='str'),
group_list=dict(required=False, type='str'),
prefix_list=dict(required=False, type='str'),
route_map=dict(required=False, type='str'),
bidir=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
mutually_exclusive=[['group_list', 'route_map'],
['group_list', 'prefix_list'],
['route_map', 'prefix_list']],
supports_check_mode=True)
state = module.params['state']
args = [
'rp_address',
'group_list',
'prefix_list',
'route_map',
'bidir'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
for each in existing:
if each.get(key) or (not each.get(key) and value):
proposed[key] = value
result = {}
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
camilonova/django | tests/forms_tests/widget_tests/test_nullbooleanselect.py | 80 | 2067 | from django.forms import NullBooleanSelect
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class NullBooleanSelectTest(WidgetTest):
widget = NullBooleanSelect()
def test_render_true(self):
self.check_html(self.widget, 'is_cool', True, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected>Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_false(self):
self.check_html(self.widget, 'is_cool', False, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected>No</option>
</select>"""
))
def test_render_none(self):
self.check_html(self.widget, 'is_cool', None, html=(
"""<select name="is_cool">
<option value="1" selected>Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_value(self):
self.check_html(self.widget, 'is_cool', '2', html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected>Yes</option>
<option value="3">No</option>
</select>"""
))
@override_settings(USE_L10N=True)
def test_l10n(self):
"""
The NullBooleanSelect widget's options are lazily localized (#17190).
"""
widget = NullBooleanSelect()
with translation.override('de-at'):
self.check_html(widget, 'id_bool', True, html=(
"""
<select name="id_bool">
<option value="1">Unbekannt</option>
<option value="2" selected>Ja</option>
<option value="3">Nein</option>
</select>
"""
))
| bsd-3-clause |
google/syzygy | third_party/numpy/files/numpy/distutils/command/scons.py | 93 | 24248 | import os
import sys
import os.path
from os.path import join as pjoin, dirname as pdirname
from distutils.errors import DistutilsPlatformError
from distutils.errors import DistutilsExecError, DistutilsSetupError
from numpy.distutils.command.build_ext import build_ext as old_build_ext
from numpy.distutils.ccompiler import CCompiler, new_compiler
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils import log
from numpy.distutils.misc_util import is_bootstrapping, get_cmd
from numpy.distutils.misc_util import get_numpy_include_dirs as _incdir
from numpy.distutils.compat import get_exception
# A few notes:
# - numscons is not mandatory to build numpy, so we cannot import it here.
# Any numscons import has to happen once we check numscons is available and
# is required for the build (call through setupscons.py or native numscons
# build).
def get_scons_build_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_build_dir
return get_scons_build_dir()
def get_scons_pkg_build_dir(pkg):
"""Return the build directory for the given package (foo.bar).
The path is relative to the top setup.py"""
from numscons.core.utils import pkg_to_path
return pjoin(get_scons_build_dir(), pkg_to_path(pkg))
def get_scons_configres_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_dir
return get_scons_configres_dir()
def get_scons_configres_filename():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_filename
return get_scons_configres_filename()
def get_scons_local_path():
"""This returns the full path where scons.py for scons-local is located."""
from numscons import get_scons_path
return get_scons_path()
def _get_top_dir(pkg):
# XXX: this mess is necessary because scons is launched per package, and
# has no knowledge outside its build dir, which is package dependent. If
# one day numscons does not launch one process/package, this will be
# unnecessary.
from numscons import get_scons_build_dir
from numscons.core.utils import pkg_to_path
scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg))
n = scdir.count(os.sep)
return os.sep.join([os.pardir for i in range(n+1)])
def get_distutils_libdir(cmd, pkg):
"""Returns the path where distutils install libraries, relatively to the
scons build directory."""
return pjoin(_get_top_dir(pkg), cmd.build_lib)
def get_distutils_clibdir(cmd, pkg):
"""Returns the path where distutils put pure C libraries."""
return pjoin(_get_top_dir(pkg), cmd.build_clib)
def get_distutils_install_prefix(pkg, inplace):
"""Returns the installation path for the current package."""
from numscons.core.utils import pkg_to_path
if inplace == 1:
return pkg_to_path(pkg)
else:
install_cmd = get_cmd('install').get_finalized_command('install')
return pjoin(install_cmd.install_libbase, pkg_to_path(pkg))
def get_python_exec_invoc():
"""This returns the python executable from which this file is invocated."""
# Do we need to take into account the PYTHONPATH, in a cross platform way,
# that is the string returned can be executed directly on supported
# platforms, and the sys.path of the executed python should be the same
# than the caller ? This may not be necessary, since os.system is said to
# take into accound os.environ. This actually also works for my way of
# using "local python", using the alias facility of bash.
return sys.executable
def get_numpy_include_dirs(sconscript_path):
"""Return include dirs for numpy.
The paths are relatively to the setup.py script path."""
from numscons import get_scons_build_dir
scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path))
n = scdir.count(os.sep)
dirs = _incdir()
rdirs = []
for d in dirs:
rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d))
return rdirs
def dirl_to_str(dirlist):
"""Given a list of directories, returns a string where the paths are
concatenated by the path separator.
example: ['foo/bar', 'bar/foo'] will return 'foo/bar:bar/foo'."""
return os.pathsep.join(dirlist)
def dist2sconscc(compiler):
"""This converts the name passed to distutils to scons name convention (C
compiler). compiler should be a CCompiler instance.
Example:
--compiler=intel -> intelc"""
compiler_type = compiler.compiler_type
if compiler_type == 'msvc':
return 'msvc'
elif compiler_type == 'intel':
return 'intelc'
else:
return compiler.compiler[0]
def dist2sconsfc(compiler):
"""This converts the name passed to distutils to scons name convention
(Fortran compiler). The argument should be a FCompiler instance.
Example:
--fcompiler=intel -> ifort on linux, ifl on windows"""
if compiler.compiler_type == 'intel':
#raise NotImplementedError('FIXME: intel fortran compiler name ?')
return 'ifort'
elif compiler.compiler_type == 'gnu':
return 'g77'
elif compiler.compiler_type == 'gnu95':
return 'gfortran'
elif compiler.compiler_type == 'sun':
return 'sunf77'
else:
# XXX: Just give up for now, and use generic fortran compiler
return 'fortran'
def dist2sconscxx(compiler):
"""This converts the name passed to distutils to scons name convention
(C++ compiler). The argument should be a Compiler instance."""
if compiler.compiler_type == 'msvc':
return compiler.compiler_type
return compiler.compiler_cxx[0]
def get_compiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of C compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler[0]
def get_f77_compiler_executable(compiler):
"""For any give FCompiler instance, this gives us the name of F77 compiler
(the actual executable)."""
return compiler.compiler_f77[0]
def get_cxxcompiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of CXX compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler_cxx[0]
def get_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def get_f77_tool_path(compiler):
"""Given a distutils.ccompiler.FCompiler class, returns the path of the
toolset related to F77 compilation."""
fullpath_exec = find_executable(get_f77_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find F77 compiler executable "\
"info for scons")
return fullpath
def get_cxx_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_cxxcompiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def protect_path(path):
"""Convert path (given as a string) to something the shell will have no
problem to understand (space, etc... problems)."""
if path:
# XXX: to this correctly, this is totally bogus for now (does not check for
# already quoted path, for example).
return '"' + path + '"'
else:
return '""'
def parse_package_list(pkglist):
return pkglist.split(",")
def find_common(seq1, seq2):
"""Given two list, return the index of the common items.
The index are relative to seq1.
Note: do not handle duplicate items."""
dict2 = dict([(i, None) for i in seq2])
return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])]
def select_packages(sconspkg, pkglist):
"""Given a list of packages in pkglist, return the list of packages which
match this list."""
common = find_common(sconspkg, pkglist)
if not len(common) == len(pkglist):
msg = "the package list contains a package not found in "\
"the current list. The current list is %s" % sconspkg
raise ValueError(msg)
return common
def check_numscons(minver):
"""Check that we can use numscons.
minver is a 3 integers tuple which defines the min version."""
try:
import numscons
except ImportError:
e = get_exception()
raise RuntimeError("importing numscons failed (error was %s), using " \
"scons within distutils is not possible without "
"this package " % str(e))
try:
# version_info was added in 0.10.0
from numscons import version_info
# Stupid me used string instead of numbers in version_info in
# dev versions of 0.10.0
if isinstance(version_info[0], str):
raise ValueError("Numscons %s or above expected " \
"(detected 0.10.0)" % str(minver))
# Stupid me used list instead of tuple in numscons
version_info = tuple(version_info)
if version_info[:3] < minver:
raise ValueError("Numscons %s or above expected (got %s) "
% (str(minver), str(version_info[:3])))
except ImportError:
raise RuntimeError("You need numscons >= %s to build numpy "\
"with numscons (imported numscons path " \
"is %s)." % (minver, numscons.__file__))
# XXX: this is a giantic mess. Refactor this at some point.
class scons(old_build_ext):
# XXX: add an option to the scons command for configuration (auto/force/cache).
description = "Scons builder"
library_options = [
('with-perflib=', None,
'Specify which performance library to use for BLAS/LAPACK/etc...' \
'Examples: mkl/atlas/sunper/accelerate'),
('with-mkl-lib=', None, 'TODO'),
('with-mkl-include=', None, 'TODO'),
('with-mkl-libraries=', None, 'TODO'),
('with-atlas-lib=', None, 'TODO'),
('with-atlas-include=', None, 'TODO'),
('with-atlas-libraries=', None, 'TODO')
]
user_options = [
('jobs=', 'j', "specify number of worker threads when executing" \
"scons"),
('inplace', 'i', 'If specified, build in place.'),
('import-env', 'e', 'If specified, import user environment into scons env["ENV"].'),
('bypass', 'b', 'Bypass distutils compiler detection (experimental).'),
('scons-tool-path=', None, 'specify additional path '\
'(absolute) to look for scons tools'),
('silent=', None, 'specify whether scons output should less verbose'\
'(1), silent (2), super silent (3) or not (0, default)'),
('log-level=', None, 'specify log level for numscons. Any value ' \
'valid for the logging python module is valid'),
('package-list=', None,
'If specified, only run scons on the given '\
'packages (example: --package-list=scipy.cluster). If empty, '\
'no package is built'),
('fcompiler=', None, "specify the Fortran compiler type"),
('compiler=', None, "specify the C compiler type"),
('cxxcompiler=', None,
"specify the C++ compiler type (same as C by default)"),
('debug', 'g',
"compile/link with debugging information"),
] + library_options
def initialize_options(self):
old_build_ext.initialize_options(self)
self.build_clib = None
self.debug = 0
self.compiler = None
self.cxxcompiler = None
self.fcompiler = None
self.jobs = None
self.silent = 0
self.import_env = 0
self.scons_tool_path = ''
# If true, we bypass distutils to find the c compiler altogether. This
# is to be used in desperate cases (like incompatible visual studio
# version).
self._bypass_distutils_cc = False
# scons compilers
self.scons_compiler = None
self.scons_compiler_path = None
self.scons_fcompiler = None
self.scons_fcompiler_path = None
self.scons_cxxcompiler = None
self.scons_cxxcompiler_path = None
self.package_list = None
self.inplace = 0
self.bypass = 0
# Only critical things
self.log_level = 50
# library options
self.with_perflib = []
self.with_mkl_lib = []
self.with_mkl_include = []
self.with_mkl_libraries = []
self.with_atlas_lib = []
self.with_atlas_include = []
self.with_atlas_libraries = []
def _init_ccompiler(self, compiler_type):
# XXX: The logic to bypass distutils is ... not so logic.
if compiler_type == 'msvc':
self._bypass_distutils_cc = True
try:
distutils_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
distutils_compiler.customize(self.distribution)
# This initialization seems necessary, sometimes, for find_executable to work...
if hasattr(distutils_compiler, 'initialize'):
distutils_compiler.initialize()
self.scons_compiler = dist2sconscc(distutils_compiler)
self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler))
except DistutilsPlatformError:
e = get_exception()
if not self._bypass_distutils_cc:
raise e
else:
self.scons_compiler = compiler_type
def _init_fcompiler(self, compiler_type):
self.fcompiler = new_fcompiler(compiler = compiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
self.scons_fcompiler = dist2sconsfc(self.fcompiler)
self.scons_fcompiler_path = protect_path(get_f77_tool_path(self.fcompiler))
def _init_cxxcompiler(self, compiler_type):
cxxcompiler = new_compiler(compiler = compiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if cxxcompiler is not None:
cxxcompiler.customize(self.distribution, need_cxx = 1)
cxxcompiler.customize_cmd(self)
self.cxxcompiler = cxxcompiler.cxx_compiler()
try:
get_cxx_tool_path(self.cxxcompiler)
except DistutilsSetupError:
self.cxxcompiler = None
if self.cxxcompiler:
self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler)
self.scons_cxxcompiler_path = protect_path(get_cxx_tool_path(self.cxxcompiler))
def finalize_options(self):
old_build_ext.finalize_options(self)
self.sconscripts = []
self.pre_hooks = []
self.post_hooks = []
self.pkg_names = []
self.pkg_paths = []
if self.distribution.has_scons_scripts():
for i in self.distribution.scons_data:
self.sconscripts.append(i.scons_path)
self.pre_hooks.append(i.pre_hook)
self.post_hooks.append(i.post_hook)
self.pkg_names.append(i.parent_name)
self.pkg_paths.append(i.pkg_path)
# This crap is needed to get the build_clib
# directory
build_clib_cmd = get_cmd("build_clib").get_finalized_command("build_clib")
self.build_clib = build_clib_cmd.build_clib
if not self.cxxcompiler:
self.cxxcompiler = self.compiler
# To avoid trouble, just don't do anything if no sconscripts are used.
# This is useful when for example f2py uses numpy.distutils, because
# f2py does not pass compiler information to scons command, and the
# compilation setup below can crash in some situation.
if len(self.sconscripts) > 0:
if self.bypass:
self.scons_compiler = self.compiler
self.scons_fcompiler = self.fcompiler
self.scons_cxxcompiler = self.cxxcompiler
else:
# Try to get the same compiler than the ones used by distutils: this is
# non trivial because distutils and scons have totally different
# conventions on this one (distutils uses PATH from user's environment,
# whereas scons uses standard locations). The way we do it is once we
# got the c compiler used, we use numpy.distutils function to get the
# full path, and add the path to the env['PATH'] variable in env
# instance (this is done in numpy.distutils.scons module).
self._init_ccompiler(self.compiler)
self._init_fcompiler(self.fcompiler)
self._init_cxxcompiler(self.cxxcompiler)
if self.package_list:
self.package_list = parse_package_list(self.package_list)
def _call_scons(self, scons_exec, sconscript, pkg_name, pkg_path, bootstrapping):
# XXX: when a scons script is missing, scons only prints warnings, and
# does not return a failure (status is 0). We have to detect this from
# distutils (this cannot work for recursive scons builds...)
# XXX: passing everything at command line may cause some trouble where
# there is a size limitation ? What is the standard solution in thise
# case ?
cmd = [scons_exec, "-f", sconscript, '-I.']
if self.jobs:
cmd.append(" --jobs=%d" % int(self.jobs))
if self.inplace:
cmd.append("inplace=1")
cmd.append('scons_tool_path="%s"' % self.scons_tool_path)
cmd.append('src_dir="%s"' % pdirname(sconscript))
cmd.append('pkg_path="%s"' % pkg_path)
cmd.append('pkg_name="%s"' % pkg_name)
cmd.append('log_level=%s' % self.log_level)
#cmd.append('distutils_libdir=%s' % protect_path(pjoin(self.build_lib,
# pdirname(sconscript))))
cmd.append('distutils_libdir=%s' %
protect_path(get_distutils_libdir(self, pkg_name)))
cmd.append('distutils_clibdir=%s' %
protect_path(get_distutils_clibdir(self, pkg_name)))
prefix = get_distutils_install_prefix(pkg_name, self.inplace)
cmd.append('distutils_install_prefix=%s' % protect_path(prefix))
if not self._bypass_distutils_cc:
cmd.append('cc_opt=%s' % self.scons_compiler)
if self.scons_compiler_path:
cmd.append('cc_opt_path=%s' % self.scons_compiler_path)
else:
cmd.append('cc_opt=%s' % self.scons_compiler)
cmd.append('debug=%s' % self.debug)
if self.scons_fcompiler:
cmd.append('f77_opt=%s' % self.scons_fcompiler)
if self.scons_fcompiler_path:
cmd.append('f77_opt_path=%s' % self.scons_fcompiler_path)
if self.scons_cxxcompiler:
cmd.append('cxx_opt=%s' % self.scons_cxxcompiler)
if self.scons_cxxcompiler_path:
cmd.append('cxx_opt_path=%s' % self.scons_cxxcompiler_path)
cmd.append('include_bootstrap=%s' % dirl_to_str(get_numpy_include_dirs(sconscript)))
cmd.append('bypass=%s' % self.bypass)
cmd.append('import_env=%s' % self.import_env)
if self.silent:
if int(self.silent) == 2:
cmd.append('-Q')
elif int(self.silent) == 3:
cmd.append('-s')
cmd.append('silent=%d' % int(self.silent))
cmd.append('bootstrapping=%d' % bootstrapping)
cmdstr = ' '.join(cmd)
if int(self.silent) < 1:
log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr)
else:
log.info("======== Executing scons command for pkg %s =========", pkg_name)
st = os.system(cmdstr)
if st:
#print "status is %d" % st
msg = "Error while executing scons command."
msg += " See above for more information.\n"
msg += """\
If you think it is a problem in numscons, you can also try executing the scons
command with --log-level option for more detailed output of what numscons is
doing, for example --log-level=0; the lowest the level is, the more detailed
the output it."""
raise DistutilsExecError(msg)
def run(self):
if len(self.sconscripts) < 1:
# nothing to do, just leave it here.
return
check_numscons(minver=(0, 11, 0))
if self.package_list is not None:
id = select_packages(self.pkg_names, self.package_list)
sconscripts = [self.sconscripts[i] for i in id]
pre_hooks = [self.pre_hooks[i] for i in id]
post_hooks = [self.post_hooks[i] for i in id]
pkg_names = [self.pkg_names[i] for i in id]
pkg_paths = [self.pkg_paths[i] for i in id]
else:
sconscripts = self.sconscripts
pre_hooks = self.pre_hooks
post_hooks = self.post_hooks
pkg_names = self.pkg_names
pkg_paths = self.pkg_paths
if is_bootstrapping():
bootstrapping = 1
else:
bootstrapping = 0
scons_exec = get_python_exec_invoc()
scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py'))
for sconscript, pre_hook, post_hook, pkg_name, pkg_path in zip(sconscripts,
pre_hooks, post_hooks,
pkg_names, pkg_paths):
if pre_hook:
pre_hook()
if sconscript:
self._call_scons(scons_exec, sconscript, pkg_name, pkg_path, bootstrapping)
if post_hook:
post_hook(**{'pkg_name': pkg_name, 'scons_cmd' : self})
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.