repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
shaistaansari/django | django/conf/locale/en_AU/formats.py | 504 | 2117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
spaceone/pyjs | pyjswidgets/pyjamas/Canvas/LinearGradientImplDefault.py | 7 | 1221 | """
* Copyright 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
from pyjamas.Canvas.CanvasGradientImplDefault import CanvasGradientImplDefault
"""*
* Default deferred binding of GradientFactory will create instances of this class.
* This corresponds to a LinearGradient for stroke or fill styles.
"""
class LinearGradientImplDefault(CanvasGradientImplDefault):
def __init__(self, x0, y0, x1, y1, c):
CanvasGradientImplDefault.__init__(self)
self.createNativeGradientObject(x0,y0,x1,y1,c)
def createNativeGradientObject(self, x0, y0, x1, y1, c):
ctx = c.getContext('2d')
gradient = ctx.createLinearGradient(x0,y0,x1,y1)
self.setNativeGradient(gradient)
| apache-2.0 |
fran-penedo/dreal3 | benchmarks/network/water/gen.py | 55 | 1248 |
flow_var = {}
flow_dec = {}
state_dec = {}
state_val = {}
cont_cond = {}
jump_cond = {}
def getHdr(n):
res = []
for i in range(n):
getHdr.counter += 1
res.append(getHdr.counter)
return res
getHdr.counter = 0
######################
# Formula generation #
######################
def print_loop(bound, steps, keys, holder):
c = 0
while True:
for j in range(steps):
hd = getHdr(holder)
for i in keys:
print(cont_cond[i][j].format(c,*hd).strip())
if c >= bound:
return
for i in keys:
print(jump_cond[i][j].format(c,c+1).strip())
c += 1
def generate(bound, steps, keys, holder, init, goal):
print("(set-logic QF_NRA_ODE)")
for i in keys:
print(flow_var[i].strip())
for i in keys:
print(flow_dec[i].strip())
for b in range(bound + 1):
for i in keys:
print(state_dec[i].format(b).strip())
for b in range(bound + 1):
for i in keys:
print(state_val[i].format(b).strip())
print(init.format(0).strip())
print_loop(bound, steps, keys, holder)
print(goal.format(bound).strip())
print("(check-sat)\n(exit)")
| gpl-3.0 |
toshywoshy/ansible | test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py | 83 | 1976 | # Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: test
plugin_type: inventory
short_description: test inventory source
extends_documentation_fragment:
- inventory_cache
'''
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
NAME = 'test'
def populate(self, hosts):
for host in list(hosts.keys()):
self.inventory.add_host(host, group='all')
for hostvar, hostval in hosts[host].items():
self.inventory.set_variable(host, hostvar, hostval)
def get_hosts(self):
return {'host1': {'one': 'two'}, 'host2': {'three': 'four'}}
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self.load_cache_plugin()
cache_key = self.get_cache_key(path)
# cache may be True or False at this point to indicate if the inventory is being refreshed
# get the user's cache option
cache_setting = self.get_option('cache')
attempt_to_read_cache = cache_setting and cache
cache_needs_update = cache_setting and not cache
# attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
if attempt_to_read_cache:
try:
results = self._cache[cache_key]
except KeyError:
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
cache_needs_update = True
if cache_needs_update:
results = self.get_hosts()
# set the cache
self._cache[cache_key] = results
self.populate(results)
| gpl-3.0 |
phammin1/QaManagement | QaManagement/env/Lib/site-packages/django/db/migrations/recorder.py | 478 | 2868 | from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
| mit |
samfpetersen/gnuradio | gr-vocoder/examples/gsm_audio_loopback.py | 58 | 1485 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.gsm_fr_encode_sp()
dec = vocoder.gsm_fr_decode_ps()
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 |
anant-dev/django | django/contrib/staticfiles/utils.py | 335 | 1976 | import fnmatch
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| bsd-3-clause |
julien78910/CouchPotatoServer | libs/guessit/transfo/guess_episodes_rexps.py | 94 | 2416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import Guess
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import episode_rexps
import re
import logging
log = logging.getLogger(__name__)
def number_list(s):
l = [ int(n) for n in re.sub('[^0-9]+', ' ', s).split() ]
if len(l) == 2:
# it is an episode interval, return all numbers in between
return range(l[0], l[1]+1)
return l
def guess_episodes_rexps(string):
for rexp, confidence, span_adjust in episode_rexps:
match = re.search(rexp, string, re.IGNORECASE)
if match:
span = (match.start() + span_adjust[0],
match.end() + span_adjust[1])
guess = Guess(match.groupdict(), confidence=confidence, raw=string[span[0]:span[1]])
# decide whether we have only a single episode number or an
# episode list
if guess.get('episodeNumber'):
eplist = number_list(guess['episodeNumber'])
guess.set('episodeNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]])
if len(eplist) > 1:
guess.set('episodeList', eplist, confidence=confidence, raw=string[span[0]:span[1]])
if guess.get('bonusNumber'):
eplist = number_list(guess['bonusNumber'])
guess.set('bonusNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]])
return guess, span
return None, None
def process(mtree):
SingleNodeGuesser(guess_episodes_rexps, None, log).process(mtree)
| gpl-3.0 |
gsehub/edx-platform | lms/djangoapps/shoppingcart/processors/helpers.py | 24 | 1058 | """
Helper methods for credit card processing modules.
These methods should be shared among all processor implementations,
but should NOT be imported by modules outside this package.
"""
from django.conf import settings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
def get_processor_config():
"""
Return a dictionary of configuration settings for the active credit card processor.
If configuration overrides are available, return those instead.
Returns:
dict
"""
# Retrieve the configuration settings for the active credit card processor
config = settings.CC_PROCESSOR.get(
settings.CC_PROCESSOR_NAME, {}
)
# Check whether configuration override exists,
# If so, find the configuration-specific cybersource config in the configurations.
# sub-key of the normal processor configuration.
config_key = configuration_helpers.get_value('cybersource_config_key')
if config_key:
config = config['microsites'][config_key]
return config
| agpl-3.0 |
hotsyk/mailpost | mailpost/fnmatch.py | 1 | 3741 | """
A package that maps incoming email to HTTP requests
Mailpost version 0.1
(C) 2010 oDesk www.oDesk.com
"""
"""
fnmatch.py - fork of original fnmatch
=====================================
.. _purpose:
Purpose of the fork
-----------------
Original fnmatch has an issue while matching escaped strings and not match
pattern in the string, just only complete match. To support in glob syntax
same rules as we support in regex, we've decided to fork it and patch
.. _description:
Original descripition of fnmatch
--------------------------------
Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
import os
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT"""
import os
import posixpath
result = []
pat = os.path.normcase(pat)
if not pat in _cache:
res = translate(pat)
_cache[pat] = re.compile(res)
match = _cache[pat].match
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
if not pat in _cache:
res = translate(pat)
_cache[pat] = re.compile(res)
return _cache[pat].match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
#fix to work with escaped string
elif c == '\\' and i < n:
c = pat[i]
i = i + 1
res = res + re.escape(c)
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| bsd-3-clause |
gltn/stdm | stdm/third_party/sqlalchemy/orm/persistence.py | 4 | 65927 | # orm/persistence.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
from itertools import chain
from itertools import groupby
import operator
from . import attributes
from . import evaluator
from . import exc as orm_exc
from . import loading
from . import sync
from .base import _entity_descriptor
from .base import state_str
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..sql import expression
from ..sql.base import _from_objects
def _bulk_insert(
mapper,
mappings,
session_transaction,
isstates,
return_defaults,
render_nulls,
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()"
)
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(
None,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
for (
state,
state_dict,
params,
mp,
conn,
value_params,
has_all_pks,
has_all_defaults,
) in _collect_insert_commands(
table,
((None, mapping, mapper, connection) for mapping in mappings),
bulk=True,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
)
_emit_insert_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=return_defaults,
)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props]),
)
def _bulk_update(
mapper, mappings, session_transaction, isstates, update_changed_only
):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
search_keys = mapper._primary_key_propkeys
if mapper._version_id_prop:
search_keys = {mapper._version_id_prop.key}.union(search_keys)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items()
if k in state.committed_state or k in search_keys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()"
)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(
None,
table,
(
(
None,
mapping,
mapper,
connection,
(
mapping[mapper._version_id_prop.key]
if mapper._version_id_prop
else None
),
)
for mapping in mappings
),
bulk=True,
)
_emit_update_statements(
base_mapper,
None,
cached_connections,
super_mapper,
table,
records,
bookkeeping=False,
)
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(base_mapper, states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
) in _organize_states_for_save(base_mapper, states, uowtransaction):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append((state, dict_, mapper, connection))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update
)
_emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
_emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
)
_finalize_insert_update_commands(
base_mapper,
uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for (state, state_dict, mapper, connection) in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update
),
),
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(
_organize_states_for_post_update(base_mapper, states, uowtransaction)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(
state,
state_dict,
sub_mapper,
connection,
mapper._get_committed_state_attr_by_column(
state, state_dict, mapper.version_id_col
)
if mapper.version_id_col is not None
else None,
)
for state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(
base_mapper, uowtransaction, table, update, post_update_cols
)
_emit_post_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(
_organize_states_for_delete(base_mapper, states, uowtransaction)
)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
elif mapper.inherits and mapper.passive_deletes:
continue
delete = _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
)
_emit_delete_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
delete,
)
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if (
not has_identity
and instance_key in uowtransaction.session.identity_map
):
instance = uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.was_already_deleted(existing):
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s"
% (state_str(state), instance_key, state_str(existing))
)
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction",
instance_key,
state_str(state),
state_str(existing),
)
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col,
)
yield (
state,
dict_,
mapper,
connection,
has_identity,
row_switch,
update_version_id,
)
def _organize_states_for_post_update(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction, states
):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
state, dict_, mapper.version_id_col
)
else:
update_version_id = None
yield (state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table,
states_to_insert,
bulk=False,
return_defaults=False,
render_nulls=False,
):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
eval_none = mapper._insert_cols_evaluating_none[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None and col not in eval_none and not render_nulls:
continue
elif not bulk and (
hasattr(value, "__clause_element__")
or isinstance(value, sql.ClauseElement)
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
else:
params[col.key] = value
if not bulk:
# for all the columns that have no default and we don't have
# a value and where "None" is not a special value, add
# explicit None to the INSERT. This is a legacy behavior
# which might be worth removing, as it should not be necessary
# and also produces confusion, given that "missing" and None
# now have distinct meanings
for colkey in (
mapper._insert_cols_as_none[table]
.difference(params)
.difference([c.key for c in value_params])
):
params[colkey] = None
if not bulk or return_defaults:
# params are in terms of Column key objects, so
# compare to pk_keys_by_table
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].issubset(
params
)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if (
mapper.version_id_generator is not False
and mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = mapper.version_id_generator(
None
)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_pks,
has_all_defaults,
)
def _collect_update_commands(
uowtransaction, table, states_to_update, bulk=False
):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in set(propkey_to_col)
.intersection(state_dict)
.difference(mapper._pk_attr_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state
):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if hasattr(value, "__clause_element__") or isinstance(
value, sql.ClauseElement
):
value_params[col] = (
value.__clause_element__()
if hasattr(value, "__clause_element__")
else value
)
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif (
state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]
)
is not True
):
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = (
mapper._server_onupdate_default_cols[table]
).issubset(params)
else:
has_all_defaults = True
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
no_params = not params and not value_params
params[col._label] = update_version_id
if (
bulk or col.key not in params
) and mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif mapper.version_id_generator is False and no_params:
# no version id generator, no values set on the table,
# and version id wasn't manually incremented.
# set version id to itself so we get an UPDATE
# statement
params[col.key] = update_version_id
elif not (params or value_params):
continue
has_all_pks = True
expect_pk_cascaded = False
if bulk:
# keys here are mapped attribute keys, so
# look at mapper attribute keys for pk
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in set(propkey_to_col).intersection(
mapper._pk_attr_keys_by_table[table]
)
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF
)
if history.added:
if (
not history.deleted
or ("pk_cascaded", state, col)
in uowtransaction.attributes
):
expect_pk_cascaded = True
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
if col in value_params:
has_all_pks = False
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col)
)
if params or value_params:
params.update(pk_params)
yield (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
)
elif expect_pk_cascaded:
# no UPDATE occurs on this table, but we expect that CASCADE rules
# have changed the primary key of the row; propagate this event to
# other columns that expect to have been modified. this normally
# occurs after the UPDATE is emitted however we invoke it here
# explicitly in the absence of our invoking an UPDATE
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _collect_post_update_commands(
base_mapper, uowtransaction, table, states_to_update, post_update_cols
):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = mapper._get_state_attr_by_column(
state, state_dict, col, passive=attributes.PASSIVE_OFF
)
elif col in post_update_cols or col.onupdate is not None:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict, attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
col = mapper.version_id_col
params[col._label] = update_version_id
if (
bool(state.key)
and col.key not in params
and mapper.version_id_generator is not False
):
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
yield state, state_dict, mapper, connection, params
def _collect_delete_commands(
base_mapper, uowtransaction, table, states_to_delete
):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for (
state,
state_dict,
mapper,
connection,
update_version_id,
) in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[
col.key
] = value = mapper._get_committed_state_attr_by_column(
state, state_dict, col
)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col)
)
if (
update_version_id is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
update,
bookkeeping=True,
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(("update", table), update_stmt)
for (
(connection, paramkeys, hasvalue, has_all_defaults, has_all_pks),
records,
) in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6], # has_all_defaults
rec[7], # has all pks
),
):
rows = 0
records = list(records)
statement = cached_stmt
return_defaults = False
if not has_all_pks:
statement = statement.return_defaults()
return_defaults = True
elif (
bookkeeping
and not has_all_defaults
and mapper.base_mapper.eager_defaults
):
statement = statement.return_defaults()
return_defaults = True
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
return_defaults = True
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if not return_defaults
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = has_all_defaults and not needs_version_id
if hasvalue:
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = connection.execute(statement.values(value_params), params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
check_rowcount = assert_singlerow
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
c = cached_connections[connection].execute(
statement, params
)
# TODO: why with bookkeeping=False?
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(
statement, multiparams
)
rows += c.rowcount
for (
state,
state_dict,
params,
mapper,
connection,
value_params,
has_all_defaults,
has_all_pks,
) in records:
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params,
True,
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_insert_statements(
base_mapper,
uowtransaction,
cached_connections,
mapper,
table,
insert,
bookkeeping=True,
):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(("insert", table), table.insert)
for (
(connection, pkeys, hasvalue, has_all_pks, has_all_defaults),
records,
) in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7],
),
):
statement = cached_stmt
if (
not bookkeeping
or (
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
)
and has_all_pks
and not hasvalue
):
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].execute(statement, multiparams)
if bookkeeping:
for (
(
state,
state_dict,
params,
mapper_rec,
conn,
value_params,
has_all_pks,
has_all_defaults,
),
last_inserted_params,
) in zip(records, c.context.compiled_parameters):
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for (
state,
state_dict,
params,
mapper_rec,
connection,
value_params,
has_all_pks,
has_all_defaults,
) in records:
if value_params:
result = connection.execute(
statement.values(value_params), params
)
else:
result = cached_connections[connection].execute(
statement, params
)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(
primary_key, mapper._pks_by_table[table]
):
prop = mapper_rec._columntoproperty[col]
if pk is not None and (
col in value_params
or state_dict.get(prop.key) is None
):
state_dict[prop.key] = pk
if bookkeeping:
if state:
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params,
False,
)
else:
_postfetch_bulk_save(mapper_rec, state_dict, table)
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update(clause)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
)
def _emit_delete_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, delete
):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type)
)
if need_version_id:
clause.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col.key, type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(("delete", table), delete_stmt)
for connection, recs in groupby(delete, lambda rec: rec[1]): # connection
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if (
need_version_id
and not connection.dialect.supports_sane_multi_rowcount
):
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified."
% connection.dialect.dialect_description
)
connection.execute(statement, del_objects)
else:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
if (
base_mapper.confirm_deleted_rows
and rows_matched > -1
and expected != rows_matched
and (
connection.dialect.supports_sane_multi_rowcount
or len(del_objects) == 1
)
):
# TODO: why does this "only warn" if versioning is turned off,
# whereas the UPDATE raises?
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning."
% (table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[
p.key
for p in mapper._readonly_props
if (
p.expire_on_flush
and (not p.deferred or p.key in state.dict)
)
or (
not p.expire_on_flush
and not p.deferred
and p.key not in state.dict
)
]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(
state._unloaded_non_object.intersection(
mapper._server_default_plus_onupdate_propkeys
)
)
if (
mapper.version_id_col is not None
and mapper.version_id_generator is False
):
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(mapper),
state.key,
refresh_state=state,
only_load_props=toload_now,
)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
if (
mapper.version_id_generator is False
and mapper.version_id_col is not None
):
if state_dict[mapper._version_id_prop.key] is None:
raise orm_exc.FlushError(
"Instance does not contain a non-NULL version value"
)
def _postfetch_post_update(
mapper, uowtransaction, table, state, dict_, result, params
):
if uowtransaction.is_deleted(state):
return
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
def _postfetch(
mapper,
uowtransaction,
table,
state,
dict_,
result,
params,
value_params,
isupdate,
):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
):
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
# pk cols returned from insert are handled
# distinctly, don't step on the values here
if col.primary_key and result.context.isinsert:
continue
# note that columns can be in the "return defaults" that are
# not mapped to this mapper, typically because they are
# "excluded", which can be specified directly or also occurs
# when using declarative w/ single table inheritance
prop = mapper._columntoproperty.get(col)
if prop:
dict_[prop.key] = row[col]
if refresh_flush:
load_evt_attrs.append(prop.key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs
)
if isupdate and value_params:
# explicitly suit the use case specified by
# [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING
# database which are set to themselves in order to do a version bump.
postfetch_cols.extend(
[
col
for col in value_params
if col.primary_key and col not in returning_cols
]
)
if postfetch_cols:
state._expire_attributes(
state.dict,
[
mapper._columntoproperty[c].key
for c in postfetch_cols
if c in mapper._columntoproperty
],
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(
state,
m,
state,
m,
equated_pairs,
uowtransaction,
mapper.passive_updates,
)
def _postfetch_bulk_save(mapper, dict_, table):
for m, equated_pairs in mapper._table_to_equated[table]:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(base_mapper, states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
)
)
def _sort_states(mapper, states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
try:
persistent_sorted = sorted(
persistent, key=mapper._persistent_sortkey_fn
)
except TypeError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Could not sort objects by primary key; primary key "
"values must be sortable in Python (was: %s)" % err
),
replace_context=err,
)
return (
sorted(pending, key=operator.attrgetter("insert_order"))
+ persistent_sorted
)
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`_query.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
("_limit", "limit()", None, operator.is_),
("_offset", "offset()", None, operator.is_),
("_order_by", "order_by()", False, operator.is_),
("_group_by", "group_by()", False, operator.is_),
("_distinct", "distinct()", False, operator.is_),
(
"_from_obj",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" % (methname,)
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x) for x in lookup)))
),
replace_context=err,
)
else:
return klass(*arg)
def exec_(self):
self._do_before_compile()
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _execute_stmt(self, stmt):
self.result = self.query._execute_crud(stmt, self.mapper)
self.rowcount = self.result.rowcount
def _do_before_compile(self):
raise NotImplementedError()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError as err:
util.raise_(
sa_exc.InvalidRequestError(
'Could not evaluate current criteria in Python: "%s". '
"Specify 'fetch' or False for the "
"synchronize_session parameter." % err
),
from_=err,
)
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj
for (
cls,
pk,
identity_token,
), obj in query.session.identity_map.items()
if issubclass(cls, target_cls) and eval_condition(obj)
]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key
)
self.matched_rows = session.execute(
select_stmt, mapper=self.mapper, params=query._params
).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory(
{
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate,
},
synchronize_session,
query,
values,
update_kwargs,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_update:
for fn in self.query.dispatch.before_compile_update:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
@property
def _resolved_values(self):
values = []
for k, v in (
self.values.items()
if hasattr(self.values, "items")
else self.values
):
if self.mapper:
if isinstance(k, util.string_types):
desc = _entity_descriptor(self.mapper, k)
values.extend(desc._bulk_update_tuples(v))
elif isinstance(k, attributes.QueryableAttribute):
values.extend(k._bulk_update_tuples(v))
else:
values.append((k, v))
else:
values.append((k, v))
return values
@property
def _resolved_values_keys_as_propnames(self):
values = []
for k, v in self._resolved_values:
if isinstance(k, attributes.QueryableAttribute):
values.append((k.key, v))
continue
elif hasattr(k, "__clause_element__"):
k = k.__clause_element__()
if self.mapper and isinstance(k, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[k]
except orm_exc.UnmappedColumnError:
pass
else:
values.append((attr.key, v))
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % k
)
return values
def _do_exec(self):
values = self._resolved_values
if not self.update_kwargs.get("preserve_parameter_order", False):
values = dict(values)
update_stmt = sql.update(
self.primary_table,
self.context.whereclause,
values,
**self.update_kwargs
)
self._execute_stmt(update_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory(
{
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete,
},
synchronize_session,
query,
)
def _do_before_compile(self):
if self.query.dispatch.before_compile_delete:
for fn in self.query.dispatch.before_compile_delete:
new_query = fn(self.query, self)
if new_query is not None:
self.query = new_query
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table, self.context.whereclause)
self._execute_stmt(delete_stmt)
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = self._resolved_values_keys_as_propnames
for key, value in values:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value)
)
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = (
attributes.instance_state(obj),
attributes.instance_dict(obj),
)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state.manager.dispatch.refresh(state, None, to_evaluate)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(
dict_, set(evaluated_keys).difference(to_evaluate)
)
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj) for obj in self.matched_objects]
)
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set(
[
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key)
)
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
]
)
values = self._resolved_values_keys_as_propnames
attrib = set(k for k, v in values)
for state in states:
to_expire = attrib.intersection(state.dict)
if to_expire:
session._expire_state(state, to_expire)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key)
)
if identity_key in session.identity_map:
session._remove_newly_deleted(
[
attributes.instance_state(
session.identity_map[identity_key]
)
]
)
| gpl-2.0 |
infinidb/autooam | autooam/test/test_clustercmd.py | 1 | 4112 | # Copyright (C) 2014 InfiniDB, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
'''
Created on Feb 22, 2013
@author: rtw
'''
import unittest
import os
import emtools.common.utils as utils
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBasic1(self):
'''Test launchcluster and various clustercmd's against a multi node cluster.'''
(ret, output, outerr) = utils.syscall_log('%s/bin/launchcluster.py -u unittest multi_1um_2pm cal-precise64 3.5.1-5' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u poweroff unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u poweron unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u pause unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u resume unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u show unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u run unittest basic001' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u destroy unittest' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
pass
def testBasic2(self):
'''Test launchcluster -f operation.'''
(ret, output, outerr) = utils.syscall_log('%s/bin/launchcluster.py -u -f unittest-b2 %s/examples/single.json cal-precise64 3.5.1-5' % (os.environ['AUTOOAM_HOME'],os.environ['AUTOOAM_HOME']))
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u destroy unittest-b2' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
pass
def testBasic3(self):
'''Test launchcluster and various clustercmd's against a single node cluster.'''
(ret, output, outerr) = utils.syscall_log('%s/bin/launchcluster.py -u unittest-b3 singlenode cal-precise64 3.5.1-5' % (os.environ['AUTOOAM_HOME']))
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u poweroff unittest-b3' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u poweron unittest-b3' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u show unittest-b3' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u run unittest-b3 basic001' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
(ret, output, outerr) = utils.syscall_log('%s/bin/clustercmd.py -u destroy unittest-b3' % os.environ['AUTOOAM_HOME'])
self.assertEqual(ret,0,output)
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 |
DirtyUnicorns/android_external_chromium_org | build/android/pylib/host_driven/setup.py | 48 | 6473 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for instrumentation host-driven tests."""
import logging
import os
import sys
import types
from pylib.host_driven import test_case
from pylib.host_driven import test_info_collection
from pylib.host_driven import test_runner
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all python files that match the testing naming scheme.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: Path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def _GetTestModules(host_driven_test_root, is_official_build):
"""Retrieve a list of python modules that match the testing naming scheme.
Walks the location of host-driven tests, imports them, and provides the list
of imported modules to the caller.
Args:
host_driven_test_root: The path to walk, looking for the
pythonDrivenTests or host_driven_tests directory
is_official_build: Whether to run only those tests marked 'official'
Returns:
A list of python modules under |host_driven_test_root| which match the
testing naming scheme. Each module should define one or more classes that
derive from HostDrivenTestCase.
"""
# By default run all host-driven tests under pythonDrivenTests or
# host_driven_tests.
host_driven_test_file_list = []
for root, _, files in os.walk(host_driven_test_root):
if (root.endswith('host_driven_tests') or
root.endswith('pythonDrivenTests') or
(is_official_build and (root.endswith('pythonDrivenTests/official') or
root.endswith('host_driven_tests/official')))):
host_driven_test_file_list += _GetPythonFiles(root, files)
host_driven_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in host_driven_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the python module associated with a file by importing it.
Args:
python_file: File to import.
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_case_class, **kwargs):
"""Returns one test object for each test method in |test_case_class|.
Test methods are methods on the class which begin with 'test'.
Args:
test_case_class: Class derived from HostDrivenTestCase which contains zero
or more test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects, each initialized for a particular test method.
"""
test_names = [m for m in dir(test_case_class)
if _IsTestMethod(m, test_case_class)]
return [test_case_class(name, **kwargs) for name in test_names]
def _GetTestsFromModule(test_module, **kwargs):
"""Gets a list of test objects from |test_module|.
Args:
test_module: Module from which to get the set of test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects each initialized for a particular test method
defined in |test_module|.
"""
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestCaseClass(attr):
tests.extend(_GetTestsFromClass(attr, **kwargs))
return tests
def _IsTestCaseClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, test_case.HostDrivenTestCase) and
test_class is not test_case.HostDrivenTestCase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: The method name.
test_case_class: The test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build, **kwargs):
"""Retrieve a list of host-driven tests defined under |test_root|.
Args:
test_root: Path which contains host-driven test files.
is_official_build: Whether this is an official build.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
List of test case objects, one for each available test method.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestsFromModule(module, **kwargs))
return all_tests
def InstrumentationSetup(host_driven_test_root, official_build,
instrumentation_options):
"""Creates a list of host-driven instrumentation tests and a runner factory.
Args:
host_driven_test_root: Directory where the host-driven tests are.
official_build: True if this is an official build.
instrumentation_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_collection = test_info_collection.TestInfoCollection()
all_tests = _GetAllTests(
host_driven_test_root, official_build,
instrumentation_options=instrumentation_options)
test_collection.AddTests(all_tests)
available_tests = test_collection.GetAvailableTests(
instrumentation_options.annotations,
instrumentation_options.exclude_annotations,
instrumentation_options.test_filter)
logging.debug('All available tests: ' + str(
[t.tagged_name for t in available_tests]))
def TestRunnerFactory(device, shard_index):
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool,
instrumentation_options.push_deps,
instrumentation_options.cleanup_test_files)
return (TestRunnerFactory, available_tests)
| bsd-3-clause |
tvon/hyde | hydeengine/__init__.py | 1 | 15441 | """
Entry Points for Hyde Engine
"""
import imp
import os
import sys
import shutil
import thread
import threading
from collections import defaultdict
from datetime import datetime
from Queue import Queue, Empty
from threading import Thread, Event
from django.conf import settings
from django.core.management import setup_environ
from django.template import add_to_builtins
from django.template.loader import render_to_string
from file_system import File, Folder
from path_util import PathUtil
from processor import Processor
from siteinfo import SiteInfo
class _HydeDefaults:
GENERATE_CLEAN_URLS = False
GENERATE_ABSOLUTE_FS_URLS = False
LISTING_PAGE_NAMES = ['index', 'default', 'listing']
APPEND_SLASH = False
MEDIA_PROCESSORS = {}
CONTENT_PROCESSORS = {}
SITE_PRE_PROCESSORS = {}
SITE_POST_PROCESSORS = {}
CONTEXT = {'blog': {}}
def setup_env(site_path):
"""
Initializes the Django Environment. NOOP if the environment is
initialized already.
"""
# Don't do it twice
if hasattr(settings, "CONTEXT"):
return
try:
hyde_site_settings = imp.load_source("hyde_site_settings",
os.path.join(site_path,"settings.py"))
except SyntaxError, err:
print "The given site_path [%s] contains a settings file " \
"that could not be loaded due syntax errors." % site_path
print err
exit()
except Exception, err:
print "Cannot Import Site Settings"
print err
raise ValueError(
"The given site_path [%s] does not contain a hyde site. "
"Give a valid path or run -init to create a new site."
% site_path
)
try:
from django.conf import global_settings
defaults = global_settings.__dict__
defaults.update(hyde_site_settings.__dict__)
settings.configure(_HydeDefaults, **defaults)
except Exception, err:
print "Site settings are not defined properly"
print err
raise ValueError(
"The given site_path [%s] has invalid settings. "
"Give a valid path or run -init to create a new site."
% site_path
)
def validate_settings():
"""
Ensures the site settings are properly configured.
"""
if settings.GENERATE_CLEAN_URLS and settings.GENERATE_ABSOLUTE_FS_URLS:
raise ValueError(
"GENERATE_CLEAN_URLS and GENERATE_ABSOLUTE_FS_URLS cannot "
"be enabled at the same time."
)
class Server(object):
"""
Initializes and runs a cherrypy webserver serving static files from the deploy
folder
"""
def __init__(self, site_path, address='localhost', port=8080):
super(Server, self).__init__()
self.site_path = os.path.abspath(os.path.expandvars(
os.path.expanduser(site_path)))
self.address = address
self.port = port
def serve(self, deploy_path, exit_listner):
"""
Starts the cherrypy server at the given `deploy_path`. If exit_listner is
provided, calls it when the engine exits.
"""
try:
import cherrypy
from cherrypy.lib.static import serve_file
except ImportError:
print "Cherry Py is required to run the webserver"
raise
setup_env(self.site_path)
validate_settings()
deploy_folder = Folder(
(deploy_path, settings.DEPLOY_DIR)
[not deploy_path])
if not 'site' in settings.CONTEXT:
generator = Generator(self.site_path)
generator.create_siteinfo()
site = settings.CONTEXT['site']
url_file_mapping = defaultdict(bool)
# This following bit is for supporting listing pages with arbitrary
# filenames.
if settings.GENERATE_CLEAN_URLS:
for page in site.walk_pages(): # build url to file mapping
if page.listing and page.file.name_without_extension not in \
(settings.LISTING_PAGE_NAMES + [page.node.name]):
filename = os.path.join(settings.DEPLOY_DIR, page.name)
url = page.url.strip('/')
url_file_mapping[url] = filename
class WebRoot:
@cherrypy.expose
def index(self):
page = site.listing_page
return serve_file(deploy_folder.child(page.name))
if settings.GENERATE_CLEAN_URLS:
@cherrypy.expose
def default(self, *args):
# first, see if the url is in the url_file_mapping
# dictionary
file = url_file_mapping[os.sep.join(args)]
if file:
return serve_file(file)
# next, try to find a listing page whose filename is the
# same as its enclosing folder's name
file = os.path.join(deploy_folder.path, os.sep.join(args),
args[-1] + '.html')
if os.path.isfile(file):
return serve_file(file)
# try each filename in LISTING_PAGE_NAMES setting
for listing_name in settings.LISTING_PAGE_NAMES:
file = os.path.join(deploy_folder.path,
os.sep.join(args),
listing_name + '.html')
if os.path.isfile(file):
return serve_file(file)
# failing that, search for a non-listing page
file = os.path.join(deploy_folder.path,
os.sep.join(args[:-1]),
args[-1] + '.html')
if os.path.isfile(file):
return serve_file(file)
# failing that, page not found
raise cherrypy.NotFound
cherrypy.config.update({'environment': 'production',
'log.error_file': 'site.log',
'log.screen': True,
'server.socket_host': self.address,
'server.socket_port': self.port,
})
# even if we're still using clean urls, we still need to serve media.
if settings.GENERATE_CLEAN_URLS:
conf = {'/media': {
'tools.staticdir.dir':os.path.join(deploy_folder.path, 'media'),
'tools.staticdir.on':True
}}
else:
conf = {'/': {
'tools.staticdir.dir': deploy_folder.path,
'tools.staticdir.on':True
}}
cherrypy.tree.mount(WebRoot(), "/", conf)
if exit_listner:
cherrypy.engine.subscribe('exit', exit_listner)
cherrypy.engine.start()
@property
def alive(self):
"""
Checks if the webserver is alive.
"""
import cherrypy
return cherrypy.engine.state == cherrypy.engine.states.STARTED
def block(self):
"""
Blocks and waits for the engine to exit.
"""
import cherrypy
cherrypy.engine.block()
def quit(self):
import cherrypy
cherrypy.engine.exit()
class Generator(object):
"""
Generates a deployable website from the templates. Can monitor the site for
"""
def __init__(self, site_path):
super(Generator, self).__init__()
self.site_path = os.path.abspath(os.path.expandvars(
os.path.expanduser(site_path)))
self.regenerate_request = Event()
self.regeneration_complete = Event()
self.queue = Queue()
self.watcher = Thread(target=self.__watch__)
self.regenerator = Thread(target=self.__regenerate__)
self.processor = Processor(settings)
self.quitting = False
def pre_process(self, node):
self.processor.pre_process(node)
def process(self, item, change="Added"):
if change in ("Added", "Modified"):
settings.CONTEXT['node'] = item.node
settings.CONTEXT['resource'] = item
return self.processor.process(item)
elif change in ("Deleted", "NodeRemoved"):
return self.processor.remove(item)
def build_siteinfo(self, deploy_path=None):
tmp_folder = Folder(settings.TMP_DIR)
deploy_folder = Folder(
(deploy_path, settings.DEPLOY_DIR)
[not deploy_path])
if deploy_folder.exists and settings.BACKUP:
backup_folder = Folder(settings.BACKUPS_DIR).make()
deploy_folder.backup(backup_folder)
tmp_folder.delete()
tmp_folder.make()
settings.DEPLOY_DIR = deploy_folder.path
if not deploy_folder.exists:
deploy_folder.make()
add_to_builtins('hydeengine.templatetags.hydetags')
add_to_builtins('hydeengine.templatetags.aym')
add_to_builtins('hydeengine.templatetags.typogrify')
self.create_siteinfo()
def create_siteinfo(self):
self.siteinfo = SiteInfo(settings, self.site_path)
self.siteinfo.refresh()
settings.CONTEXT['site'] = self.siteinfo.content_node
def post_process(self, node):
self.processor.post_process(node)
def process_all(self):
self.pre_process(self.siteinfo)
for resource in self.siteinfo.walk_resources():
self.process(resource)
self.post_process(self.siteinfo)
self.siteinfo.target_folder.copy_contents_of(
self.siteinfo.temp_folder, incremental=True)
def __regenerate__(self):
pending = False
while True:
try:
if self.quit_event.isSet():
print "Exiting regenerator..."
break
# Wait for the regeneration event to be set
self.regenerate_request.wait(5)
# Wait until there are no more requests
# Got a request, we dont want to process it
# immedietely since other changes may be under way.
# Another request coming in renews the initil request.
# When there are no more requests, we go ahead and process
# the event.
if not self.regenerate_request.isSet() and pending:
pending = False
self.process_all()
self.regeneration_complete.set()
elif self.regenerate_request.isSet():
self.regeneration_complete.clear()
pending = True
self.regenerate_request.clear()
except:
self.quit()
raise
def __watch__(self):
regenerating = False
while True:
try:
if self.quit_event.isSet():
print "Exiting watcher..."
break
try:
pending = self.queue.get(timeout=10)
except Empty:
continue
self.queue.task_done()
if pending.setdefault("exception", False):
self.quit_event.set()
print "Exiting watcher"
break
if 'resource' in pending:
resource = pending['resource']
if self.regeneration_complete.isSet():
regenerating = False
if pending['change'] == "Deleted":
self.process(resource, pending['change'])
elif pending['change'] == "NodeRemoved":
self.process(pending['node'], pending['change'])
if (pending['change'] in ("Deleted", "NodeRemoved") or
resource.is_layout or regenerating):
regenerating = True
self.regenerate_request.set()
continue
if self.process(resource, pending['change']):
self.post_process(resource.node)
self.siteinfo.target_folder.copy_contents_of(
self.siteinfo.temp_folder, incremental=True)
except:
self.quit()
raise
def generate(self, deploy_path=None,
keep_watching=False,
exit_listner=None):
self.exit_listner = exit_listner
self.quit_event = Event()
setup_env(self.site_path)
validate_settings()
self.build_siteinfo(deploy_path)
self.process_all()
self.siteinfo.temp_folder.delete()
if keep_watching:
try:
self.siteinfo.temp_folder.make()
self.watcher.start()
self.regenerator.start()
self.siteinfo.monitor(self.queue)
except (KeyboardInterrupt, IOError, SystemExit):
self.quit()
raise
except:
self.quit()
raise
def block(self):
try:
while self.watcher.isAlive():
self.watcher.join(0.1)
while self.regenerator.isAlive():
self.regenerator.join(0.1)
self.siteinfo.dont_monitor()
except (KeyboardInterrupt, IOError, SystemExit):
self.quit()
raise
except:
self.quit()
raise
def quit(self):
if self.quitting:
return
self.quitting = True
print "Shutting down..."
self.siteinfo.dont_monitor()
self.quit_event.set()
if self.exit_listner:
self.exit_listner()
class Initializer(object):
def __init__(self, site_path):
super(Initializer, self).__init__()
self.site_path = Folder(site_path)
def initialize(self, root, template=None, force=False):
if not template:
template = "default"
root_folder = Folder(root)
template_dir = root_folder.child_folder("templates", template)
if not template_dir.exists:
raise ValueError(
"Cannot find the specified template[%s]." % template_dir)
if self.site_path.exists:
files = os.listdir(self.site_path.path)
PathUtil.filter_hidden_inplace(files)
if len(files) and not force:
raise ValueError(
"The site_path[%s] is not empty." % self.site_path)
else:
self.site_path.delete()
self.site_path.make()
self.site_path.copy_contents_of(template_dir)
| mit |
psmit/kaldi-recipes | common/make_lfst_pre.py | 2 | 1973 | #!/usr/bin/env python3
import sys
import math
# Three base states.
# 0, start-state, all arcs from _E phones
# 1, all arcs to _B phones (connected with 1 through <w>)
# 2, all arcs from and to _I phones
next_state=5
def print_word(word, phones, start, end, from_state, to_state):
global next_state
cur_state = from_state
phones = list(phones)
disambig = []
while len(phones) > 0 and phones[-1].startswith("#"):
disambig.insert(0,phones[-1])
phones = phones[:-1]
#make sure no disambig phones were hiding somewhere else in the sequence
assert not any(p.startswith("#") for p in phones)
phones = [p.split('_')[0] for p in phones]
labels = ["I"] * len(phones)
if start:
labels[0] = "B"
if end:
labels[-1] = "E"
if len(phones) == 1 and start and end:
labels[0] = "S"
phones = ["{}_{}".format(p,l) for p,l in zip(phones, labels)] + disambig
assert len(phones) > 0
while len(phones) > 1:
print("{}\t{}\t{}\t{}".format(cur_state,next_state,phones[0],word))
cur_state = next_state
next_state += 1
word = "<eps>"
phones = phones[1:]
print("{}\t{}\t{}\t{}".format(cur_state,to_state,phones[0],word))
disambig_symbol = sys.argv[1]
print("{}\t{}\t{}\t{}\t{}".format(0,4,"SIL","<eps>", -math.log(0.5)))
print("{}\t{}\t{}\t{}".format(4,1,disambig_symbol ,"<eps>"))
print("{}\t{}\t{}\t{}\t{}".format(0,1,"<eps>","<eps>", -math.log(0.5)))
print("{}\t{}\t{}\t{}".format(2,3,disambig_symbol ,"<eps>"))
for line in sys.stdin:
word, prob, phones = line.strip().split(None, 2)
phones = phones.split()
assert len(phones) > 0
start,si = True,1
if word.startswith("|") or word.startswith("+"):
start,si = False,3
print_word(word, phones, start, True, si, 0)
if word == "<UNK>": continue
print_word(word, phones, start, False, si, 2)
print("{}\t0".format(1))
| apache-2.0 |
Fale/ansible | test/lib/ansible_test/_internal/payload.py | 21 | 5772 | """Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import os
import stat
import tarfile
import tempfile
import time
from . import types as t
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
# this bin symlink map must exactly match the contents of the bin directory
# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
ANSIBLE_BIN_SYMLINK_MAP = {
'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
'ansible-config': 'ansible',
'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
'ansible-console': 'ansible',
'ansible-doc': 'ansible',
'ansible-galaxy': 'ansible',
'ansible-inventory': 'ansible',
'ansible-playbook': 'ansible',
'ansible-pull': 'ansible',
'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
'ansible-vault': 'ansible',
}
def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
filters = {}
def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
"""Make the given file executable."""
tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
return tar_info
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
is_subdir(f[1], 'test/lib/ansible_test/')]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
collection_layouts = data_context().create_collection_layouts()
content_files = []
extra_files = []
for layout in collection_layouts:
if layout == data_context().content:
# include files from the current collection (layout.collection.directory will be added later)
content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
else:
# include files from each collection in the same collection root as the content being tested
extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
else:
# when testing ansible itself the ansible source is the content
content_files = files
# there are no extra files when testing ansible itself
extra_files = []
for callback in data_context().payload_callbacks:
# execute callbacks only on the content paths
# this is done before placing them in the appropriate subdirectory (see below)
callback(content_files)
# place ansible source files under the 'ansible' directory on the delegated host
files = [(src, os.path.join('ansible', dst)) for src, dst in files]
if data_context().content.collection:
# place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
# extra files already have the correct destination path
files.extend(extra_files)
# maintain predictable file order
files = sorted(set(files))
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.TarFile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst, filter=filters.get(dst))
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
atexit.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.symlink(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
| gpl-3.0 |
servo/servo | tests/wpt/web-platform-tests/webdriver/tests/get_title/iframe.py | 16 | 2183 | import pytest
from tests.support.asserts import assert_success
"""
Tests that WebDriver can transcend site origins.
Many modern browsers impose strict cross-origin checks,
and WebDriver should be able to transcend these.
Although an implementation detail, certain browsers
also enforce process isolation based on site origin.
This is known to sometimes cause problems for WebDriver implementations.
"""
@pytest.fixture
def frame_doc(inline):
return inline("<title>cheese</title><p>frame")
@pytest.fixture
def one_frame_doc(inline, frame_doc):
return inline("<title>bar</title><iframe src='%s'></iframe>" % frame_doc)
@pytest.fixture
def nested_frames_doc(inline, one_frame_doc):
return inline("<title>foo</title><iframe src='%s'></iframe>" % one_frame_doc)
def get_title(session):
return session.transport.send(
"GET", "session/{session_id}/title".format(**vars(session)))
def test_no_iframe(session, inline):
session.url = inline("<title>Foobar</title><h2>Hello</h2>")
result = get_title(session)
assert_success(result, "Foobar")
def test_iframe(session, one_frame_doc):
session.url = one_frame_doc
frame = session.find.css("iframe", all=False)
session.switch_frame(frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "bar")
def test_nested_iframe(session, nested_frames_doc):
session.url = nested_frames_doc
outer_frame = session.find.css("iframe", all=False)
session.switch_frame(outer_frame)
inner_frame = session.find.css("iframe", all=False)
session.switch_frame(inner_frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "foo")
@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
def test_origin(session, inline, iframe, domain):
session.url = inline("<title>foo</title>{}".format(
iframe("<title>bar</title><p>frame", domain=domain)))
frame = session.find.css("iframe", all=False)
session.switch_frame(frame)
session.find.css("p", all=False)
response = get_title(session)
assert_success(response, "foo")
| mpl-2.0 |
hellsgate1001/bookit | docs/env/Lib/sre_parse.py | 156 | 26798 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if hi:
max = int(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
| mit |
jamiefolsom/edx-platform | lms/djangoapps/certificates/management/commands/regenerate_user.py | 73 | 5816 | """Django management command to force certificate regeneration for one user"""
import logging
import copy
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from certificates.models import BadgeAssertion
from certificates.api import regenerate_user_certificates
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Put a request on the queue to recreate the certificate for a particular user in a particular course."""
option_list = BaseCommand.option_list + (
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="Don't grade or add certificate requests to the queue"),
make_option('--insecure',
action='store_true',
dest='insecure',
default=False,
help="Don't use https for the callback url to the LMS, useful in http test environments"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='The course id (e.g., mit/6-002x/circuits-and-electronics) for which the student named in'
'<username> should be graded'),
make_option('-u', '--user',
metavar='USERNAME',
dest='username',
default=False,
help='The username or email address for whom grading and certification should be requested'),
make_option('-G', '--grade',
metavar='GRADE',
dest='grade_value',
default=None,
help='The grade string, such as "Distinction", which should be passed to the certificate agent'),
make_option('-T', '--template',
metavar='TEMPLATE',
dest='template_file',
default=None,
help='The template file used to render this certificate, like "QMSE01-distinction.pdf"'),
)
def handle(self, *args, **options):
# Scrub the username from the log message
cleaned_options = copy.copy(options)
if 'username' in cleaned_options:
cleaned_options['username'] = '<USERNAME>'
LOGGER.info(
(
u"Starting to create tasks to regenerate certificates "
u"with arguments %s and options %s"
),
unicode(args),
unicode(cleaned_options)
)
if options['course']:
# try to parse out the course from the serialized form
try:
course_id = CourseKey.from_string(options['course'])
except InvalidKeyError:
LOGGER.warning(
(
u"Course id %s could not be parsed as a CourseKey; "
u"falling back to SlashSeparatedCourseKey.from_deprecated_string()"
),
options['course']
)
course_id = SlashSeparatedCourseKey.from_deprecated_string(options['course'])
else:
raise CommandError("You must specify a course")
user = options['username']
if not (course_id and user):
raise CommandError('both course id and student username are required')
student = None
if '@' in user:
student = User.objects.get(email=user, courseenrollment__course_id=course_id)
else:
student = User.objects.get(username=user, courseenrollment__course_id=course_id)
course = modulestore().get_course(course_id, depth=2)
if not options['noop']:
LOGGER.info(
(
u"Adding task to the XQueue to generate a certificate "
u"for student %s in course '%s'."
),
student.id,
course_id
)
# Add the certificate request to the queue
ret = regenerate_user_certificates(
student, course_id, course=course,
forced_grade=options['grade_value'],
template_file=options['template_file'],
insecure=options['insecure']
)
try:
badge = BadgeAssertion.objects.get(user=student, course_id=course_id)
badge.delete()
LOGGER.info(u"Cleared badge for student %s.", student.id)
except BadgeAssertion.DoesNotExist:
pass
LOGGER.info(
(
u"Added a certificate regeneration task to the XQueue "
u"for student %s in course '%s'. "
u"The new certificate status is '%s'."
),
student.id,
unicode(course_id),
ret
)
else:
LOGGER.info(
(
u"Skipping certificate generation for "
u"student %s in course '%s' "
u"because the noop flag is set."
),
student.id,
unicode(course_id)
)
LOGGER.info(
(
u"Finished regenerating certificates command for "
u"user %s and course '%s'."
),
student.id,
unicode(course_id)
)
| agpl-3.0 |
sfstpala/Victory-Chat | cherrypy/test/test_sessionauthenticate.py | 42 | 2170 | import cherrypy
from cherrypy.test import helper
class SessionAuthenticateTest(helper.CPWebCase):
def setup_server():
def check(username, password):
# Dummy check_username_and_password function
if username != 'test' or password != 'password':
return 'Wrong login/password'
def augment_params():
# A simple tool to add some things to request.params
# This is to check to make sure that session_auth can handle request
# params (ticket #780)
cherrypy.request.params["test"] = "test"
cherrypy.tools.augment_params = cherrypy.Tool('before_handler',
augment_params, None, priority=30)
class Test:
_cp_config = {'tools.sessions.on': True,
'tools.session_auth.on': True,
'tools.session_auth.check_username_and_password': check,
'tools.augment_params.on': True,
}
def index(self, **kwargs):
return "Hi %s, you are logged in" % cherrypy.request.login
index.exposed = True
cherrypy.tree.mount(Test())
setup_server = staticmethod(setup_server)
def testSessionAuthenticate(self):
# request a page and check for login form
self.getPage('/')
self.assertInBody('<form method="post" action="do_login">')
# setup credentials
login_body = 'username=test&password=password&from_page=/'
# attempt a login
self.getPage('/do_login', method='POST', body=login_body)
self.assertStatus((302, 303))
# get the page now that we are logged in
self.getPage('/', self.cookies)
self.assertBody('Hi test, you are logged in')
# do a logout
self.getPage('/do_logout', self.cookies, method='POST')
self.assertStatus((302, 303))
# verify we are logged out
self.getPage('/', self.cookies)
self.assertInBody('<form method="post" action="do_login">')
| isc |
PsychoGame/omnirom_kernel_lge_msm8974-old | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
jds2001/sos | sos/plugins/openstack_sahara.py | 1 | 2992 | # Copyright (C) 2015 Red Hat, Inc.,Poornima M. Kshirsagar <pkshiras@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class OpenStackSahara(Plugin):
"""OpenStack Sahara"""
plugin_name = 'openstack_sahara'
profiles = ('openstack', 'openstack_controller')
option_list = []
def setup(self):
self.add_copy_spec("/etc/sahara/")
self.add_journal(units="openstack-sahara-all")
self.add_journal(units="openstack-sahara-api")
self.add_journal(units="openstack-sahara-engine")
self.limit = self.get_option("log_size")
if self.get_option("all_logs"):
self.add_copy_spec(["/var/log/sahara/",
"/var/log/containers/sahara/"],
sizelimit=self.limit)
else:
self.add_copy_spec(["/var/log/sahara/*.log",
"/var/log/containers/sahara/*.log"],
sizelimit=self.limit)
if self.get_option("verify"):
self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
def postproc(self):
protect_keys = [
"admin_password", "memcache_secret_key", "password",
"qpid_password", "rabbit_password", "ssl_key_password",
"xenapi_connection_password", "connection"
]
regexp = r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys)
self.do_path_regex_sub("/etc/sahara/*", regexp, r"\1*********")
class DebianSahara(OpenStackSahara, DebianPlugin, UbuntuPlugin):
"""OpenStackSahara related information for Debian based distributions."""
packages = (
'sahara-api',
'sahara-common',
'sahara-engine',
'python-sahara',
'python-saharaclient',
)
def setup(self):
super(DebianSahara, self).setup()
class RedHatSahara(OpenStackSahara, RedHatPlugin):
"""OpenStack sahara related information for Red Hat distributions."""
packages = (
'openstack-sahara',
'openstack-sahara-api',
'openstack-sahara-engine',
'python-saharaclient'
)
def setup(self):
super(RedHatSahara, self).setup()
self.add_copy_spec("/etc/sudoers.d/sahara*")
# vim: et ts=4 sw=4
| gpl-2.0 |
hoosteeno/kuma | vendor/packages/translate/convert/po2idml.py | 23 | 7461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Takes an IDML template file and a PO file containing translations of
strings in the IDML template. It creates a new IDML file using the translations
of the PO file.
"""
from cStringIO import StringIO
from zipfile import ZIP_DEFLATED, ZipFile
import lxml.etree as etree
from translate.convert import convert
from translate.storage import factory
from translate.storage.idml import (NO_TRANSLATE_ELEMENTS,
INLINE_ELEMENTS, copy_idml, open_idml)
from translate.storage.xml_extract.extract import (ParseState,
process_idml_translatable)
from translate.storage.xml_extract.generate import (apply_translations,
replace_dom_text)
from translate.storage.xml_extract.unit_tree import XPathTree, build_unit_tree
def translate_idml(template, input_file, translatable_files):
def load_dom_trees(template):
"""Return a dict with translatable files in the template IDML package.
The keys are the filenames inside the IDML package, and the values are
the etrees for each of those translatable files.
"""
idml_data = open_idml(template)
parser = etree.XMLParser(strip_cdata=False)
return dict((filename, etree.fromstring(data, parser).getroottree())
for filename, data in idml_data.iteritems())
def load_unit_tree(input_file):
"""Return a dict with the translations grouped by files IDML package.
The keys are the filenames inside the template IDML package, and the
values are XPathTree instances for each of those files.
"""
store = factory.getobject(input_file)
def extract_unit_tree(filename, root_dom_element_name):
"""Find the subtree in 'tree' which corresponds to the data in XML
file 'filename'
"""
tree = build_unit_tree(store, filename)
try:
file_tree = tree.children[root_dom_element_name, 0]
except KeyError:
file_tree = XPathTree()
return (filename, file_tree)
return dict(extract_unit_tree(filename, 'idPkg:Story')
for filename in translatable_files)
def translate_dom_trees(unit_trees, dom_trees):
"""Return a dict with the translated files for the IDML package.
The keys are the filenames for the translatable files inside the
template IDML package, and the values are etree ElementTree instances
for each of those files.
"""
def get_po_doms(unit):
"""Return a tuple with unit source and target DOM objects.
This method is method is meant to provide a way to retrieve the DOM
objects for the unit source and target for PO stores.
Since POunit doesn't have any source_dom nor target_dom attributes,
it is necessary to craft those objects.
"""
def add_node_content(string, node):
"""Append the translatable content to the node.
The string is going to have XLIFF placeables, so we have to
parse it as XML in order to get the right nodes to append to
the node.
"""
# Add a wrapper "whatever" tag to avoid problems when parsing
# several sibling tags at the root level.
fake_string = "<whatever>" + string + "</whatever>"
# Copy the children to the XLIFF unit's source or target node.
fake_node = etree.fromstring(fake_string)
node.extend(fake_node.getchildren())
return node
source_dom = etree.Element("source")
source_dom = add_node_content(unit.source, source_dom)
target_dom = etree.Element("target")
if unit.target:
target_dom = add_node_content(unit.target, target_dom)
else:
target_dom = add_node_content(unit.source, target_dom)
return (source_dom, target_dom)
make_parse_state = lambda: ParseState(NO_TRANSLATE_ELEMENTS,
INLINE_ELEMENTS)
for filename, dom_tree in dom_trees.iteritems():
file_unit_tree = unit_trees[filename]
apply_translations(dom_tree.getroot(), file_unit_tree,
replace_dom_text(make_parse_state,
dom_retriever=get_po_doms,
process_translatable=process_idml_translatable))
return dom_trees
dom_trees = load_dom_trees(template)
unit_trees = load_unit_tree(input_file)
return translate_dom_trees(unit_trees, dom_trees)
def write_idml(template_zip, output_file, dom_trees):
"""Write the translated IDML package."""
output_zip = ZipFile(output_file, 'w', compression=ZIP_DEFLATED)
# Copy the IDML package.
output_zip = copy_idml(template_zip, output_zip, dom_trees.keys())
# Replace the translated files in the IDML package.
for filename, dom_tree in dom_trees.iteritems():
output_zip.writestr(filename, etree.tostring(dom_tree,
encoding='UTF-8',
xml_declaration=True,
standalone='yes'))
def convertpo(input_file, output_file, template):
"""Create a translated IDML using an IDML template and a PO file."""
# Since the convertoptionsparser will give us a open files, we risk that
# they could have been opened in non-binary mode on Windows, and then we'll
# have problems, so let's make sure we have what we want.
template.close()
template = file(template.name, mode='rb')
output_file.close()
output_file = file(output_file.name, mode='wb')
# Now proceed with the conversion.
template_zip = ZipFile(template, 'r')
translatable_files = [filename for filename in template_zip.namelist()
if filename.startswith('Stories/')]
po_data = input_file.read()
dom_trees = translate_idml(template, StringIO(po_data), translatable_files)
write_idml(template_zip, output_file, dom_trees)
output_file.close()
return True
def main(argv=None):
formats = {
('po', 'idml'): ("idml", convertpo),
}
parser = convert.ConvertOptionParser(formats, usetemplates=True,
description=__doc__)
parser.run(argv)
if __name__ == '__main__':
main()
| mpl-2.0 |
stefanreuther/bob | pym/bob/cmds/build/clean.py | 1 | 8870 | # Bob build tool
# Copyright (C) 2016-2018 TechniSat Digital GmbH
#
# SPDX-License-Identifier: GPL-3.0-or-later
from ...input import RecipeSet
from ...scm import getScm, ScmTaint, ScmStatus
from ...state import BobState
from ...tty import colorize, ERROR, WARNING, EXECUTED, DEFAULT
from ...utils import removePath, processDefines
import argparse
import os
from .builder import LocalBuilder
from .state import DevelopDirOracle
__all__ = ['doClean']
UNKNOWN = ScmStatus(ScmTaint.unknown)
def collectPaths(rootPackage):
paths = set()
done = set()
def walk(package):
if package._getId() in done: return
done.add(package._getId())
checkoutStep = package.getCheckoutStep()
if checkoutStep.isValid():
paths.add(checkoutStep.getWorkspacePath())
# Remove known directories where the digest does not match. The
# directory state is stored as list where the first entry is the
# incremental variant id.
buildStep = package.getBuildStep()
if buildStep.isValid():
p = buildStep.getWorkspacePath()
state = BobState().getDirectoryState(p, False)
if (state is None) or (buildStep.getVariantId() == state[0]):
paths.add(p)
# Remove known directories where the digest does not match.
packageStep = package.getPackageStep()
p = packageStep.getWorkspacePath()
state = BobState().getDirectoryState(p, False)
if (state is None) or (packageStep.getVariantId() == state):
paths.add(p)
for d in package.getDirectDepSteps():
walk(d.getPackage())
walk(rootPackage)
return paths
def checkSCM(workspace, scmDir, scmSpec, verbose):
if scmDir is None: return True
if scmSpec is not None:
status = getScm(scmSpec).status(workspace)
else:
status = UNKNOWN
if verbose:
flags = str(status)
if status.error:
color = ERROR
elif not status.expendable:
color = WARNING
else:
color = EXECUTED if flags else DEFAULT
if scmDir != ".":
workspace = os.path.join(workspace, scmDir)
print(colorize("STATUS {0: <4} {1}".format(flags, workspace), color))
return status.expendable
def checkRegularSource(workspace, verbose):
ret = True
state = BobState().getDirectoryState(workspace, True)
for (scmDir, (scmDigest, scmSpec)) in state.items():
if not checkSCM(workspace, scmDir, scmSpec, verbose):
ret = False
return ret
def checkAtticSource(workspace, verbose):
scmSpec = BobState().getAtticDirectoryState(workspace)
# We must remove the 'dir' propery if present because the attic directory
# is already the final directory. Old projects might have scmSpec as None!
if scmSpec and ('dir' in scmSpec): del scmSpec['dir']
return checkSCM(workspace, ".", scmSpec, verbose)
def doClean(argv, bobRoot):
parser = argparse.ArgumentParser(prog="bob clean",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Clean unused directories.
This command removes currently unused directories from previous bob dev/build
invocations. By default only 'build' and 'package' steps are evicted. Adding
'-s' will clean 'checkout' steps too. Make sure that you have checked in (and
pushed) all your changes, tough. When in doubt add '--dry-run' to see what
would get removed without actually deleting that already.
""")
group = parser.add_mutually_exclusive_group()
group.add_argument('--develop', action='store_const', const='develop', dest='mode',
help="Clean developer mode directories (dev/..., default)", default='develop')
group.add_argument('--release', action='store_const', const='release', dest='mode',
help="Clean release mode directories (work/...)")
group.add_argument('--attic', action='store_const', const='attic', dest='mode',
help="Clean attic directories (dev/.../attic_*)")
parser.add_argument('-c', dest="configFile", default=[], action='append',
help="Use config File")
parser.add_argument('-D', default=[], action='append', dest="defines",
help="Override default environment variable")
parser.add_argument('--dry-run', default=False, action='store_true',
help="Don't delete, just print what would be deleted")
parser.add_argument('-f', '--force', default=False, action='store_true',
help="Force deletion of unknown/unclean SCMs")
parser.add_argument('-s', '--src', default=False, action='store_true',
help="Clean source workspaces too")
group = parser.add_mutually_exclusive_group()
group.add_argument('--sandbox', action='store_true', default=None,
help="Enable sandboxing")
group.add_argument('--no-sandbox', action='store_false', dest='sandbox',
help="Disable sandboxing")
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help="Print what is done")
args = parser.parse_args(argv)
defines = processDefines(args.defines)
develop = args.mode != 'release'
if args.sandbox is None:
args.sandbox = not develop
recipes = RecipeSet()
recipes.defineHook('releaseNameFormatter', LocalBuilder.releaseNameFormatter)
recipes.defineHook('developNameFormatter', LocalBuilder.developNameFormatter)
recipes.defineHook('developNamePersister', None)
recipes.setConfigFiles(args.configFile)
recipes.parse()
# Get directory name formatter into shape
if develop:
nameFormatter = recipes.getHook('developNameFormatter')
developPersister = DevelopDirOracle(nameFormatter, recipes.getHook('developNamePersister'))
nameFormatter = developPersister.getFormatter()
else:
# Special read-only "persister" that does create new entries. The
# actual formatter is irrelevant.
nameFormatter = LocalBuilder.releaseNameInterrogator
nameFormatter = LocalBuilder.makeRunnable(nameFormatter)
packages = recipes.generatePackages(nameFormatter, defines, args.sandbox)
if develop: developPersister.prime(packages)
if args.mode == 'attic':
delPaths = sorted(d for d in BobState().getAtticDirectories()
if os.path.exists(d) and (args.force or checkAtticSource(d, args.verbose)))
else:
if args.mode == 'release':
# collect all used paths
usedPaths = collectPaths(packages.getRootPackage())
# get all known release paths
allPaths = [ (os.path.join(dir, "workspace"), isSourceDir)
for (dir, isSourceDir) in BobState().getAllNameDirectores() ]
elif args.mode == 'develop':
# collect all used paths
usedPaths = collectPaths(packages.getRootPackage())
# Determinte all known develop paths. Bob does not directly store
# this information. We start with all known directories and
# subtract the release paths. Source workspaces are detected by
# their state being a dict (instead of a bytes object).
releasePaths = set(os.path.join(dir, "workspace")
for (dir, isSourceDir) in BobState().getAllNameDirectores())
allPaths = [
(dir, isinstance(BobState().getDirectoryState(dir, False), dict))
for dir in BobState().getDirectories()
if dir not in releasePaths ]
# Source workspace policy
if args.src:
if args.force:
mayClean = lambda d: True
else:
mayClean = lambda d: checkRegularSource(d, args.verbose)
else:
mayClean = lambda d: False
# Remove non-existent directories and source workspaces that are not
# allowed to be touched.
delPaths = sorted(d for (d, isSourceDir) in allPaths
if (d not in usedPaths) and os.path.exists(d) and
(not isSourceDir or mayClean(d)))
# Finally delete unused directories.
BobState().setAsynchronous()
try:
for d in delPaths:
if args.verbose or args.dry_run:
print("rm", d)
if not args.dry_run:
removePath(d)
if args.mode == 'attic':
BobState().delAtticDirectoryState(d)
else:
BobState().delDirectoryState(d)
# cleanup BobState() of non-existent directories
if not args.dry_run:
for d in BobState().getDirectories():
if not os.path.exists(d): BobState().delDirectoryState(d)
for d in BobState().getAtticDirectories():
if not os.path.exists(d): BobState().delAtticDirectoryState(d)
finally:
BobState().setSynchronous()
| gpl-3.0 |
arborh/tensorflow | tensorflow/lite/testing/op_tests/less_equal.py | 4 | 2557 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for less_equal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
| apache-2.0 |
adykstra/mne-python | tutorials/epochs/plot_visualize_epochs.py | 10 | 5143 | """
.. _tut_viz_epochs:
Visualize Epochs data
=====================
"""
# sphinx_gallery_thumbnail_number = 7
import os.path as op
import mne
data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')
raw = mne.io.read_raw_fif(
op.join(data_path, 'sample_audvis_raw.fif'), preload=True)
raw.load_data().filter(None, 9, fir_design='firwin')
raw.set_eeg_reference('average', projection=True) # set EEG average reference
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5)
###############################################################################
# This tutorial focuses on visualization of epoched data. All of the functions
# introduced here are basically high level matplotlib functions with built in
# intelligence to work with epoched data. All the methods return a handle to
# matplotlib figure instance.
#
# Events used for constructing the epochs here are the triggers for subject
# being presented a smiley face at the center of the visual field. More of the
# paradigm at :ref:`BABDHIFJ`.
#
# All plotting functions start with ``plot``. Let's start with the most
# obvious. :func:`mne.Epochs.plot` offers an interactive browser that allows
# rejection by hand when called in combination with a keyword ``block=True``.
# This blocks the execution of the script until the browser window is closed.
epochs.plot(block=True)
###############################################################################
# The numbers at the top refer to the event id of the epoch. The number at the
# bottom is the running numbering for the epochs.
#
# Since we did no artifact correction or rejection, there are epochs
# contaminated with blinks and saccades. For instance, epoch number 1 seems to
# be contaminated by a blink (scroll to the bottom to view the EOG channel).
# This epoch can be marked for rejection by clicking on top of the browser
# window. The epoch should turn red when you click it. This means that it will
# be dropped as the browser window is closed.
#
# It is possible to plot event markers on epoched data by passing ``events``
# keyword to the epochs plotter. The events are plotted as vertical lines and
# they follow the same coloring scheme as :func:`mne.viz.plot_events`. The
# events plotter gives you all the events with a rough idea of the timing.
# Since the colors are the same, the event plotter can also function as a
# legend for the epochs plotter events. It is also possible to pass your own
# colors via ``event_colors`` keyword. Here we can plot the reaction times
# between seeing the smiley face and the button press (event 32).
#
# When events are passed, the epoch numbering at the bottom is switched off by
# default to avoid overlaps. You can turn it back on via settings dialog by
# pressing `o` key. You should check out `help` at the lower left corner of the
# window for more information about the interactive features.
events = mne.pick_events(events, include=[5, 32])
mne.viz.plot_events(events)
epochs['smiley'].plot(events=events)
###############################################################################
# To plot individual channels as an image, where you see all the epochs at one
# glance, you can use function :func:`mne.Epochs.plot_image`. It shows the
# amplitude of the signal over all the epochs plus an average (evoked response)
# of the activation. We explicitly set interactive colorbar on (it is also on
# by default for plotting functions with a colorbar except the topo plots). In
# interactive mode you can scale and change the colormap with mouse scroll and
# up/down arrow keys. You can also drag the colorbar with left/right mouse
# button. Hitting space bar resets the scale.
epochs.plot_image(278, cmap='interactive', sigma=1., vmin=-250, vmax=250)
###############################################################################
# We can also give an overview of all channels by calculating the global
# field power (or other other aggregation methods). However, combining
# multiple channel types (e.g., MEG and EEG) in this way is not sensible.
# Instead, we can use the ``group_by`` parameter. Setting ``group_by`` to
# 'type' combines channels by type.
# ``group_by`` can also be used to group channels into arbitrary groups, e.g.
# regions of interests, by providing a dictionary containing
# group name -> channel indices mappings.
epochs.plot_image(combine='gfp', group_by='type', sigma=2., cmap="YlGnBu_r")
###############################################################################
# You also have functions for plotting channelwise information arranged into a
# shape of the channel array. The image plotting uses automatic scaling by
# default, but noisy channels and different channel types can cause the scaling
# to be a bit off. Here we define the limits by hand.
epochs.plot_topo_image(vmin=-250, vmax=250, title='ERF images', sigma=2.,
fig_facecolor='w', font_color='k')
| bsd-3-clause |
lizardsystem/lizard-fewsnorm | lizard_fewsnorm/management/commands/sync_aqmad.py | 1 | 2050 | #!/usr/bin/python
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt.
from optparse import make_option
from django.core.management.base import BaseCommand
from lizard_fewsnorm.models import FewsNormSource
from django.db import transaction
from lizard_security.models import DataSet
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Synchronizes trackrecords in datasources into TrackRecordCache.
"""
help = ("Example: bin/django sync_track_records "\
"--db_name=fewsnorm1 --data_set=MyDataSet")
option_list = BaseCommand.option_list + (
make_option('--db_name',
help='name of fewsnorm database, optionally',
type='str',
default=None),
make_option('--data_set',
help='name of the data set',
type='str',
default=None))
def get_sources(self, source):
if source is None:
logger.info("No database provided, taking all FewsNormSource "
"entries.")
return FewsNormSource.objects.filter(active=True)
else:
logger.info("Filtering FewsNormSource on database_name='%s'." %
source)
# Note: you can also sync non-active sources.
return FewsNormSource.objects.filter(database_name=source)
@transaction.commit_on_success
def handle(self, *args, **options):
data_set_name = options["data_set"]
if data_set_name:
data_set = DataSet.objects.get(name=data_set_name)
else:
data_set = None
sources = self.get_sources(options["db_name"])
if not sources:
logger.info("No databases selected. Check your database "
"settings and db_name (if provided) .")
for source in sources:
logger.debug(
'Updating AqmadCache for %s...', source.name,
)
source.sync_aqmad()
| gpl-3.0 |
ubic135/odoo-design | addons/lunch/wizard/lunch_validation.py | 440 | 1296 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_validation(osv.Model):
""" lunch validation """
_name = 'lunch.validation'
_description = 'lunch validation for order'
def confirm(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').confirm(cr, uid, ids, context=context)
| agpl-3.0 |
jwilk/rss2email | setup.py | 4 | 2402 | # Copyright (C) 2012-2013 Arun Persaud <apersaud@lbl.gov>
# W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"A python script that converts RSS/Atom newsfeeds to email"
import codecs as _codecs
from distutils.core import setup
import os.path as _os_path
from rss2email import __version__, __url__, __author__, __email__
_this_dir = _os_path.dirname(__file__)
setup(
name='rss2email',
version=__version__,
maintainer=__author__,
maintainer_email=__email__,
url=__url__,
download_url='https://github.com/wking/rss2email/archive/v{}.tar.gz'.format(__version__),
license='GNU General Public License (GPL)',
platforms=['all'],
description=__doc__,
long_description=_codecs.open(
_os_path.join(_this_dir, 'README'), 'r', encoding='utf-8').read(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Communications :: Email',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['rss2email', 'rss2email.post_process'],
scripts=['r2e'],
provides=['rss2email'],
requires=[
'feedparser (>=5.0.1)',
'html2text (>=3.0.1)',
],
)
| gpl-2.0 |
actionfiguredaniel/eventall | crawler/twitter_spliter.py | 1 | 1609 | import json
import datetime
import urllib.parse
import mysql.connector
from eventall2016_credentials import *
def tweet_spliter():
f = open('twitter_stream.json', 'r+')
d = f.readlines()
--f.seek(0)
count = 0
for data in d:
if count >= 700000:
data = data.strip(',')
if len(data) < 2:
continue
print('\n' + str(count))
try:
json_data = json.loads(data)
except:
continue
post_status = db_post(json_data)
if not post_status:
with open('twitter_stream_errors.json', 'a') as file:
file.write(data)
count += 1
f.truncate()
f.close()
def db_post(data):
data['data'] = json.dumps(data).replace('"', "''")
data['text'] = urllib.parse.quote(data['text'])
data['created_at'] = datetime.datetime.strptime(
data['created_at'], '%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d %H:%M:%S')
config = eventall2016_credentials()
query = '''INSERT INTO twitter2016 (tweet_timestamp, tweet_text, tweet_json)
VALUES ("{created_at}", "{text}", "{data}");'''.format(**data)
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
try:
cursor.execute(query)
cnx.commit()
status = True
except mysql.connector.Error as e:
cnx.rollback()
status = False
finally:
cursor.close()
cnx.close()
print('Post status: ' + str(status))
return status
if __name__ == '__main__':
tweet_spliter()
| gpl-3.0 |
terkaa/linuxcnc | src/hal/user_comps/vismach/xyzbc-trt-gui.py | 6 | 5716 | #!/usr/bin/python
#**************************************************************************
# Copyright 2016 Rudy du Preez <rudy@asmsa.co.za>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#**************************************************************************
#--------------------------------------------------------------------------
# Visualization model of the Hermle mill, as modified to 5-axis
# with rotary axes B and C added, with moving spindle head
# and rotary axis offsets
#--------------------------------------------------------------------------
from vismach import *
import hal
import math
import sys
c = hal.component("xyzbc-trt-gui")
# table-x
c.newpin("table-x", hal.HAL_FLOAT, hal.HAL_IN)
# saddle-y
c.newpin("saddle-y", hal.HAL_FLOAT, hal.HAL_IN)
# head vertical slide
c.newpin("spindle-z", hal.HAL_FLOAT, hal.HAL_IN)
# table-x tilt-b
c.newpin("tilt-b", hal.HAL_FLOAT, hal.HAL_IN)
# rotary table-x
c.newpin("rotate-c", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("z-offset", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("x-offset", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("tool-offset", hal.HAL_FLOAT, hal.HAL_IN)
c.ready()
for setting in sys.argv[1:]: exec setting
tooltip = Capture()
tool = Collection([
tooltip,
CylinderZ(0,0.2,6,3),
CylinderZ(6,3,70,3)
])
tool = Translate([tool],0,0,-20)
tool = Color([1,0,0,0], [tool] )
tool = HalTranslate([tool],c,"tool-offset",0,0,-1)
spindle = Collection([
# spindle nose and/or toolholder
Color([0,0.5,0.5,0], [CylinderZ( 0, 10, 20, 15)]),
# spindle housing
CylinderZ( 20, 20, 135, 20),
])
spindle = Color([0,0.5,0.5,0], [spindle])
spindle = Collection([ tool, spindle ])
spindle = Translate([spindle],0,0,20)
# spindle motor
motor = Collection([
Color([0,0.5,0.5,0],
[CylinderZ(135,30,200,30)])
])
motor = Translate([motor],0,60,0)
head = Collection([
spindle,
# head, holds spindle
Color([0,0.5,0.5,0], [Box( -30, -30, 60, 30, 130, 135 )]),
motor
])
head = Translate([head],0,0,150)
head= HalTranslate([head],c,"spindle-z",0,0,1)
work = Capture()
ctable = Collection([
work,
CylinderZ(-18, 50, 0, 50),
# cross
Color([1,1,1,0], [CylinderX(-50,1,50,1)]),
Color([1,1,1,0], [CylinderY(-50,1,50,1)]),
# lump on one side
Color([1,1,1,0], [Box( -4, -42, -20, 4, -51, 5)])
])
ctable = HalRotate([ctable],c,"rotate-c",1,0,0,1)
ctable = Color([1,0,1,0], [ctable] )
crotary = Collection([
ctable,
# # rotary table base - part under table
Color([0.3,0.5,1,0], [Box(-50,-50, -30, 50, 50, -18)])
])
crotary = HalTranslate([crotary],c,"x-offset",0,1,0)
crotary = HalTranslate([crotary],c,"z-offset",0,0,-1)
yoke = Collection([
# trunnion plate
Color([1,0.5,0,0], [Box(-65,-40,-35,65,40,-25)]),
# side plate left
Color([1,0.5,0,0], [Box(-65,-40,-35,-55,40,0)]),
# side plate right
Color([1,0.5,0,0], [Box(55,-40,-35,65,40,0)])
])
trunnion = Collection([
Color([1,0.5,0,0],[CylinderX(-78,20,-55,20)]),
Color([1,0.5,0,0],[CylinderX(55,15,70,15)]),
# mark on drive side
Color([1,1,1,0], [Box(-80,-20,-1,-78,20,1)])
])
arotary = Collection([ crotary, yoke, trunnion ])
arotary = Rotate([arotary],90,0,0,1)
arotary = HalRotate([arotary],c,"tilt-b",1,0,1,0)
arotary = HalTranslate([arotary],c,"x-offset",1,0,0)
arotary = HalTranslate([arotary],c,"z-offset",0,0,1)
brackets = Collection([
# a bracket left side
Box(-77,-40,-50,-67,40,0),
# a bracket right side
Box(77,-40,-50,67,40,0),
# mounting plate
Box(77,40,-52,-77,-40,-40)
])
brackets = Rotate([brackets],90,0,0,1)
brackets = HalTranslate([brackets],c,"x-offset",1,0,0)
brackets = HalTranslate([brackets],c,"z-offset",0,0,1)
# main table - for three axis, put work here instead of rotary
table = Collection([
arotary,
brackets,
# body of table
Box(-150,-50, -69, 150, 50, -52),
# ways
Box(-150,-40, -75, 150, 40, -69)
])
table = HalTranslate([table],c,"table-x",-1,0,0)
table = Color([0.4,0.4,0.4,0], [table] )
saddle = Collection([
table,
#
Box(-75,-53, -105, 75, 53, -73),
])
saddle = HalTranslate([saddle],c,"saddle-y",0,-1,0)
saddle = Color([0.8,0.8,0.8,0], [saddle] )
yslide = Collection([
saddle,
Box(-50, -100, -180, 50, 120, -105),
])
# default Z position is with the workpoint lined up with the toolpoint
yslide = Translate([yslide], 0, 0, 150)
yslide = Color([0,1,0,0], [yslide] )
base = Collection([
head,
# base
Box(-120, -100, -200, 120, 160, -30),
# column
Box(-50, 120, -200, 50, 220, 360)
])
base = Color([0,1,0,0], [base] )
model = Collection([yslide, base])
myhud = Hud()
myhud.show("xyzbc: 3/4/16")
main(model, tooltip, work, 500, hud=myhud)
| gpl-2.0 |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/chardet/langgreekmodel.py | 269 | 12688 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'char_to_order_map': Latin7_char_to_order_map,
'precedence_matrix': GreekLangModel,
'typical_positive_ratio': 0.982851,
'keep_english_letter': False,
'charset_name': "ISO-8859-7",
'language': 'Greek',
}
Win1253GreekModel = {
'char_to_order_map': win1253_char_to_order_map,
'precedence_matrix': GreekLangModel,
'typical_positive_ratio': 0.982851,
'keep_english_letter': False,
'charset_name': "windows-1253",
'language': 'Greek',
}
| apache-2.0 |
simonwydooghe/ansible | test/units/modules/network/fortios/test_fortios_log_fortiguard_override_setting.py | 21 | 7214 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_fortiguard_override_setting
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_fortiguard_override_setting.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_fortiguard_override_setting_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortiguard_override_setting': {
'override': 'enable',
'status': 'enable',
'upload_day': 'test_value_5',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortiguard_override_setting.fortios_log_fortiguard(input_data, fos_instance)
expected_data = {
'override': 'enable',
'status': 'enable',
'upload-day': 'test_value_5',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_8'
}
set_method_mock.assert_called_with('log.fortiguard', 'override-setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_fortiguard_override_setting_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortiguard_override_setting': {
'override': 'enable',
'status': 'enable',
'upload_day': 'test_value_5',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortiguard_override_setting.fortios_log_fortiguard(input_data, fos_instance)
expected_data = {
'override': 'enable',
'status': 'enable',
'upload-day': 'test_value_5',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_8'
}
set_method_mock.assert_called_with('log.fortiguard', 'override-setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_fortiguard_override_setting_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortiguard_override_setting': {
'override': 'enable',
'status': 'enable',
'upload_day': 'test_value_5',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortiguard_override_setting.fortios_log_fortiguard(input_data, fos_instance)
expected_data = {
'override': 'enable',
'status': 'enable',
'upload-day': 'test_value_5',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_8'
}
set_method_mock.assert_called_with('log.fortiguard', 'override-setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_fortiguard_override_setting_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortiguard_override_setting': {
'random_attribute_not_valid': 'tag',
'override': 'enable',
'status': 'enable',
'upload_day': 'test_value_5',
'upload_interval': 'daily',
'upload_option': 'store-and-upload',
'upload_time': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortiguard_override_setting.fortios_log_fortiguard(input_data, fos_instance)
expected_data = {
'override': 'enable',
'status': 'enable',
'upload-day': 'test_value_5',
'upload-interval': 'daily',
'upload-option': 'store-and-upload',
'upload-time': 'test_value_8'
}
set_method_mock.assert_called_with('log.fortiguard', 'override-setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
rpmcpp/Audacity | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Tools/ruby.py | 316 | 3925 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Options,Utils
from waflib.TaskGen import before_method,feature,after_method,Task,extension
from waflib.Configure import conf
@feature('rubyext')
@before_method('apply_incpaths','apply_lib_vars','apply_bundle','apply_link')
def init_rubyext(self):
self.install_path='${ARCHDIR_RUBY}'
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'RUBY'in self.uselib:
self.uselib.append('RUBY')
if not'RUBYEXT'in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before_method('apply_link','propagate_uselib')
def apply_ruby_so_name(self):
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['rubyext_PATTERN']
@conf
def check_ruby_version(self,minver=()):
if Options.options.rubybinary:
self.env.RUBY=Options.options.rubybinary
else:
self.find_program('ruby',var='RUBY')
ruby=self.env.RUBY
try:
version=self.cmd_and_log([ruby,'-e','puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except Exception:
self.fatal('could not determine ruby version')
self.env.RUBY_VERSION=version
try:
ver=tuple(map(int,version.split(".")))
except Exception:
self.fatal('unsupported ruby version %r'%version)
cver=''
if minver:
if ver<minver:
self.fatal('ruby is too old %r'%ver)
cver='.'.join([str(x)for x in minver])
else:
cver=ver
self.msg('Checking for ruby version %s'%str(minver or''),cver)
@conf
def check_ruby_ext_devel(self):
if not self.env.RUBY:
self.fatal('ruby detection is required first')
if not self.env.CC_NAME and not self.env.CXX_NAME:
self.fatal('load a c/c++ compiler first')
version=tuple(map(int,self.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(self.cmd_and_log([self.env.RUBY,'-rrbconfig','-e',cmd]))
def read_config(key):
return read_out('puts Config::CONFIG[%r]'%key)
ruby=self.env['RUBY']
archdir=read_config('archdir')
cpppath=archdir
if version>=(1,9,0):
ruby_hdrdir=read_config('rubyhdrdir')
cpppath+=ruby_hdrdir
cpppath+=[os.path.join(ruby_hdrdir[0],read_config('arch')[0])]
self.check(header_name='ruby.h',includes=cpppath,errmsg='could not find ruby header file')
self.env.LIBPATH_RUBYEXT=read_config('libdir')
self.env.LIBPATH_RUBYEXT+=archdir
self.env.INCLUDES_RUBYEXT=cpppath
self.env.CFLAGS_RUBYEXT=read_config('CCDLFLAGS')
self.env.rubyext_PATTERN='%s.'+read_config('DLEXT')[0]
flags=read_config('LDSHARED')
while flags and flags[0][0]!='-':
flags=flags[1:]
if len(flags)>1 and flags[1]=="ppc":
flags=flags[2:]
self.env.LINKFLAGS_RUBYEXT=flags
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBS')
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBRUBYARG_SHARED')
if Options.options.rubyarchdir:
self.env.ARCHDIR_RUBY=Options.options.rubyarchdir
else:
self.env.ARCHDIR_RUBY=read_config('sitearchdir')[0]
if Options.options.rubylibdir:
self.env.LIBDIR_RUBY=Options.options.rubylibdir
else:
self.env.LIBDIR_RUBY=read_config('sitelibdir')[0]
@conf
def check_ruby_module(self,module_name):
self.start_msg('Ruby module %s'%module_name)
try:
self.cmd_and_log([self.env['RUBY'],'-e','require \'%s\';puts 1'%module_name])
except Exception:
self.end_msg(False)
self.fatal('Could not find the ruby module %r'%module_name)
self.end_msg(True)
@extension('.rb')
def process(self,node):
tsk=self.create_task('run_ruby',node)
class run_ruby(Task.Task):
run_str='${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}'
def options(opt):
opt.add_option('--with-ruby-archdir',type='string',dest='rubyarchdir',help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir',type='string',dest='rubylibdir',help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary',type='string',dest='rubybinary',help='Specify alternate ruby binary')
| gpl-2.0 |
ooici/pyon | pyon/datastore/couchbase/datastore.py | 1 | 1315 | #!/usr/bin/env python
__author__ = 'Michael Meisinger, Seman Said'
from pyon.core.bootstrap import get_obj_registry, CFG
from pyon.core.object import IonObjectBase, IonObjectSerializer, IonObjectDeserializer
from pyon.datastore.datastore import DataStore
from pyon.datastore.couchdb.pyon_store import PyonCouchDataStoreMixin
from pyon.datastore.couchbase.base_store import CouchbaseDataStore
from pyon.util.log import log
class CouchbasePyonDataStore(CouchbaseDataStore, PyonCouchDataStoreMixin):
"""
Pyon specialization of Couchbase datastore.
This class adds IonObject handling to the underlying base datastore.
"""
def __init__(self, datastore_name=None, profile=None, config=None, scope=None, **kwargs):
log.debug('__init__(datastore_name=%s, profile=%s, config=%s)', datastore_name, profile, config)
CouchbaseDataStore.__init__(self, datastore_name=datastore_name,
config=config or CFG.get_safe("server.couchdb"),
profile=profile or DataStore.DS_PROFILE.BASIC,
scope=scope)
# IonObject Serializers
self._io_serializer = IonObjectSerializer()
self._io_deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())
| bsd-2-clause |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_long_future.py | 129 | 9026 | from __future__ import division
# When true division is the default, get rid of this and add it to
# test_long.py instead. In the meantime, it's too obscure to try to
# trick just part of test_long into using future division.
import sys
import random
import math
import unittest
from test.test_support import run_unittest
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
DBL_MAX = sys.float_info.max
DBL_MAX_EXP = sys.float_info.max_exp
DBL_MIN_EXP = sys.float_info.min_exp
DBL_MANT_DIG = sys.float_info.mant_dig
DBL_MIN_OVERFLOW = 2**DBL_MAX_EXP - 2**(DBL_MAX_EXP - DBL_MANT_DIG - 1)
# pure Python version of correctly-rounded true division
def truediv(a, b):
"""Correctly-rounded true division for integers."""
negative = a^b < 0
a, b = abs(a), abs(b)
# exceptions: division by zero, overflow
if not b:
raise ZeroDivisionError("division by zero")
if a >= DBL_MIN_OVERFLOW * b:
raise OverflowError("int/int too large to represent as a float")
# find integer d satisfying 2**(d - 1) <= a/b < 2**d
d = a.bit_length() - b.bit_length()
if d >= 0 and a >= 2**d * b or d < 0 and a * 2**-d >= b:
d += 1
# compute 2**-exp * a / b for suitable exp
exp = max(d, DBL_MIN_EXP) - DBL_MANT_DIG
a, b = a << max(-exp, 0), b << max(exp, 0)
q, r = divmod(a, b)
# round-half-to-even: fractional part is r/b, which is > 0.5 iff
# 2*r > b, and == 0.5 iff 2*r == b.
if 2*r > b or 2*r == b and q % 2 == 1:
q += 1
result = math.ldexp(float(q), exp)
return -result if negative else result
class TrueDivisionTests(unittest.TestCase):
def test(self):
huge = 1L << 40000
mhuge = -huge
self.assertEqual(huge / huge, 1.0)
self.assertEqual(mhuge / mhuge, 1.0)
self.assertEqual(huge / mhuge, -1.0)
self.assertEqual(mhuge / huge, -1.0)
self.assertEqual(1 / huge, 0.0)
self.assertEqual(1L / huge, 0.0)
self.assertEqual(1 / mhuge, 0.0)
self.assertEqual(1L / mhuge, 0.0)
self.assertEqual((666 * huge + (huge >> 1)) / huge, 666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / mhuge, 666.5)
self.assertEqual((666 * huge + (huge >> 1)) / mhuge, -666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / huge, -666.5)
self.assertEqual(huge / (huge << 1), 0.5)
self.assertEqual((1000000 * huge) / huge, 1000000)
namespace = {'huge': huge, 'mhuge': mhuge}
for overflow in ["float(huge)", "float(mhuge)",
"huge / 1", "huge / 2L", "huge / -1", "huge / -2L",
"mhuge / 100", "mhuge / 100L"]:
# If the "eval" does not happen in this module,
# true division is not enabled
with self.assertRaises(OverflowError):
eval(overflow, namespace)
for underflow in ["1 / huge", "2L / huge", "-1 / huge", "-2L / huge",
"100 / mhuge", "100L / mhuge"]:
result = eval(underflow, namespace)
self.assertEqual(result, 0.0, 'expected underflow to 0 '
'from {!r}'.format(underflow))
for zero in ["huge / 0", "huge / 0L", "mhuge / 0", "mhuge / 0L"]:
with self.assertRaises(ZeroDivisionError):
eval(zero, namespace)
def check_truediv(self, a, b, skip_small=True):
"""Verify that the result of a/b is correctly rounded, by
comparing it with a pure Python implementation of correctly
rounded division. b should be nonzero."""
a, b = long(a), long(b)
# skip check for small a and b: in this case, the current
# implementation converts the arguments to float directly and
# then applies a float division. This can give doubly-rounded
# results on x87-using machines (particularly 32-bit Linux).
if skip_small and max(abs(a), abs(b)) < 2**DBL_MANT_DIG:
return
try:
# use repr so that we can distinguish between -0.0 and 0.0
expected = repr(truediv(a, b))
except OverflowError:
expected = 'overflow'
except ZeroDivisionError:
expected = 'zerodivision'
try:
got = repr(a / b)
except OverflowError:
got = 'overflow'
except ZeroDivisionError:
got = 'zerodivision'
self.assertEqual(expected, got, "Incorrectly rounded division {}/{}: "
"expected {}, got {}".format(a, b, expected, got))
@requires_IEEE_754
def test_correctly_rounded_true_division(self):
# more stringent tests than those above, checking that the
# result of true division of ints is always correctly rounded.
# This test should probably be considered CPython-specific.
# Exercise all the code paths not involving Gb-sized ints.
# ... divisions involving zero
self.check_truediv(123, 0)
self.check_truediv(-456, 0)
self.check_truediv(0, 3)
self.check_truediv(0, -3)
self.check_truediv(0, 0)
# ... overflow or underflow by large margin
self.check_truediv(671 * 12345 * 2**DBL_MAX_EXP, 12345)
self.check_truediv(12345, 345678 * 2**(DBL_MANT_DIG - DBL_MIN_EXP))
# ... a much larger or smaller than b
self.check_truediv(12345*2**100, 98765)
self.check_truediv(12345*2**30, 98765*7**81)
# ... a / b near a boundary: one of 1, 2**DBL_MANT_DIG, 2**DBL_MIN_EXP,
# 2**DBL_MAX_EXP, 2**(DBL_MIN_EXP-DBL_MANT_DIG)
bases = (0, DBL_MANT_DIG, DBL_MIN_EXP,
DBL_MAX_EXP, DBL_MIN_EXP - DBL_MANT_DIG)
for base in bases:
for exp in range(base - 15, base + 15):
self.check_truediv(75312*2**max(exp, 0), 69187*2**max(-exp, 0))
self.check_truediv(69187*2**max(exp, 0), 75312*2**max(-exp, 0))
# overflow corner case
for m in [1, 2, 7, 17, 12345, 7**100,
-1, -2, -5, -23, -67891, -41**50]:
for n in range(-10, 10):
self.check_truediv(m*DBL_MIN_OVERFLOW + n, m)
self.check_truediv(m*DBL_MIN_OVERFLOW + n, -m)
# check detection of inexactness in shifting stage
for n in range(250):
# (2**DBL_MANT_DIG+1)/(2**DBL_MANT_DIG) lies halfway
# between two representable floats, and would usually be
# rounded down under round-half-to-even. The tiniest of
# additions to the numerator should cause it to be rounded
# up instead.
self.check_truediv((2**DBL_MANT_DIG + 1)*12345*2**200 + 2**n,
2**DBL_MANT_DIG*12345)
# 1/2731 is one of the smallest division cases that's subject
# to double rounding on IEEE 754 machines working internally with
# 64-bit precision. On such machines, the next check would fail,
# were it not explicitly skipped in check_truediv.
self.check_truediv(1, 2731)
# a particularly bad case for the old algorithm: gives an
# error of close to 3.5 ulps.
self.check_truediv(295147931372582273023, 295147932265116303360)
for i in range(1000):
self.check_truediv(10**(i+1), 10**i)
self.check_truediv(10**i, 10**(i+1))
# test round-half-to-even behaviour, normal result
for m in [1, 2, 4, 7, 8, 16, 17, 32, 12345, 7**100,
-1, -2, -5, -23, -67891, -41**50]:
for n in range(-10, 10):
self.check_truediv(2**DBL_MANT_DIG*m + n, m)
# test round-half-to-even, subnormal result
for n in range(-20, 20):
self.check_truediv(n, 2**1076)
# largeish random divisions: a/b where |a| <= |b| <=
# 2*|a|; |ans| is between 0.5 and 1.0, so error should
# always be bounded by 2**-54 with equality possible only
# if the least significant bit of q=ans*2**53 is zero.
for M in [10**10, 10**100, 10**1000]:
for i in range(1000):
a = random.randrange(1, M)
b = random.randrange(a, 2*a+1)
self.check_truediv(a, b)
self.check_truediv(-a, b)
self.check_truediv(a, -b)
self.check_truediv(-a, -b)
# and some (genuinely) random tests
for _ in range(10000):
a_bits = random.randrange(1000)
b_bits = random.randrange(1, 1000)
x = random.randrange(2**a_bits)
y = random.randrange(1, 2**b_bits)
self.check_truediv(x, y)
self.check_truediv(x, -y)
self.check_truediv(-x, y)
self.check_truediv(-x, -y)
def test_main():
run_unittest(TrueDivisionTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
zorojean/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
vibhorag/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
omaciel/robottelo | tests/foreman/ui_deprecated/test_puppet.py | 3 | 3953 | """DEPRECATED UI FUNCTIONALITY"""
# from robottelo.config import settings
# from robottelo.decorators import (
# run_in_one_thread,
# skip_if_not_set,
# stubbed,
# tier3,
# upgrade,
# )
# from robottelo.test import UITestCase
# @run_in_one_thread
# class PuppetTestCase(UITestCase):
# """Implements Puppet test scenario"""
# @classmethod
# @skip_if_not_set('clients')
# def setUpClass(cls):
# super(PuppetTestCase, cls).setUpClass()
# cls.sat6_hostname = settings.server.hostname
# @stubbed()
# @tier3
# @upgrade
# def test_positive_puppet_scenario(self):
# """Tests extensive all-in-one puppet scenario
# :id: eecfbd37-2bd4-41d3-b6fd-9b3427d1158d
# :Steps:
# 1. Create an organization and upload a cloned manifest for it.
# 2. Enable respective Satellite Tools repos and sync them.
# 3. Create a product and a LFE
# 4. Create a puppet repos within the product
# 5. Upload motd puppet module into the repo
# 6. Upload parameterizable puppet module and create smart params for
# it
# 7. Create a CV and add Tools repo and puppet module(s)
# 8. Publish and promote CV to the LFE
# 9. Create AK with the product and enable Satellite Tools in it
# 10. Create a libvirt compute resource
# 11. Create a sane subnet and sane domain to be used by libvirt
# 12. Create a hostgroup associated with all created entities
# (especially Puppet Classes has added puppet modules)
# 13. Provision a host using the hostgroup on libvirt resource
# 14. Assert that puppet agent can run on the host
# 15. Assert that the puppet modules get installed by provisioning
# 16. Run facter on host and assert that was successful
# :expectedresults: multiple asserts along the code
# :CaseAutomation: notautomated
# :CaseLevel: System
# """
# @run_in_one_thread
# class PuppetCapsuleTestCase(UITestCase):
# """Implements Puppet test scenario with standalone capsule"""
# @classmethod
# @skip_if_not_set('clients')
# def setUpClass(cls):
# super(PuppetCapsuleTestCase, cls).setUpClass()
# cls.sat6_hostname = settings.server.hostname
# @stubbed()
# @tier3
# @upgrade
# def test_positive_puppet_capsule_scenario(self):
# """Tests extensive all-in-one puppet scenario via Capsule
# :id: d028bb38-2224-45fd-b2af-79666c6b0b72
# :Steps:
# 1. Create an organization and upload a cloned manifest for it.
# 2. Enable respective Satellite Tools repos and sync them.
# 3. Create a product and a LFE
# 4. Create a puppet repos within the product
# 5. Upload motd puppet module into the repo
# 6. Upload parameterizable puppet module and create smart params for
# it
# 7. Create a CV and add Tools repo and puppet module(s)
# 8. Publish and promote CV to the LFE
# 9. Create AK with the product and enable Satellite Tools in it
# 10. Create a libvirt compute resource
# 11. Create a sane subnet and sane domain to be used by libvirt
# 12. Create a hostgroup associated with all created entities
# (especially Puppet Classes has added puppet modules)
# 13. Provision a host using the hostgroup on libvirt resource
# 14. Assert that puppet agent can run on the host
# 15. Assert that the puppet modules get installed by provisioning
# 16. Run facter on host and assert that was successful
# :expectedresults: multiple asserts along the code
# :CaseAutomation: notautomated
# :CaseLevel: System
# """
| gpl-3.0 |
Pajinek/spacewalk | client/rhel/yum-rhn-plugin/actions/errata.py | 7 | 2441 | #!/usr/bin/python
# Client code for Update Agent
# Copyright (c) 1999--2015 Red Hat, Inc. Distributed under GPLv2.
#
# Author: Adrian Likins <alikins@redhat.com
#
from up2date_client import rhnserver
from up2date_client import up2dateAuth
from up2date_client import pkgUtils
from rhn.actions import packages
__rhnexport__ = [
'update']
# action version we understand
ACTION_VERSION = 2
def __getErrataInfo(errata_id):
s = rhnserver.RhnServer()
return s.errata.getErrataInfo(up2dateAuth.getSystemId(), errata_id)
def update(errataidlist, cache_only=None):
packagelist = []
if type(errataidlist) not in [type([]), type(())]:
errataidlist = [ errataidlist ]
for errataid in errataidlist:
tmpList = __getErrataInfo(errataid)
packagelist = packagelist + tmpList
current_packages_with_arch = {}
current_packages ={}
for p in pkgUtils.getInstalledPackageList(getArch=1):
current_packages_with_arch[p['name']+p['arch']] = p
current_packages[p['name']] = p
u = {}
# only update packages that are currently installed
# since an "applicable errata" may only contain some packages
# that actually apply. aka kernel. Fun fun fun.
if len(packagelist[0]) > 4:
# Newer sats send down arch, filter using name+arch
for p in packagelist:
if current_packages_with_arch.has_key(p[0]+p[4]):
u[p[0]+p[4]] = p
elif current_packages_with_arch.has_key(p[0]+"noarch"):
u[p[0]+p[4]] = p
elif p[4] == "noarch" and current_packages.has_key(p[0]):
u[p[0]] = p
else:
# 5.2 and older sats + hosted dont send arch
for p in packagelist:
if current_packages.has_key(p[0]):
u[p[0]] = p
# XXX: Fix me - once we keep all errata packages around,
# this is the WRONG thing to do - we want to keep the specific versions
# that the user has asked for.
packagelist = map(lambda a: u[a], u.keys())
if packagelist == []:
data = {}
data['version'] = "0"
data['name'] = "errata.update.no_packages"
data['erratas'] = errataidlist
return (39,
"No packages from that errata are available",
data)
return packages.update(packagelist, cache_only)
def main():
print update([23423423])
if __name__ == "__main__":
main()
| gpl-2.0 |
ms-iot/python | cpython/Lib/test/test_queue.py | 12 | 13011 | # Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def tearDown(self):
self.t = None
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return queue.Queue._get(self)
class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
jagguli/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/gdal/geomtype.py | 404 | 2967 | from django.contrib.gis.gdal.error import OGRException
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, basestring):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
| apache-2.0 |
pliniopereira/ccd3 | src/business/configuration/settingsImage.py | 1 | 2253 | from PyQt5 import QtCore
from src.business.configuration.constants import imager as i
class SettingsImage:
def __init__(self):
self._settings = QtCore.QSettings()
self.setup_settings()
def setup_settings(self):
self._settings = QtCore.QSettings(i.FILENAME, QtCore.QSettings.IniFormat)
self._settings.setFallbacksEnabled(False)
def save_settings(self):
self._settings.sync()
def set_image_settings(self, get_level1, get_level2, crop_xi, crop_xf, crop_yi, crop_yf,
ignore_crop, image_png, image_tif, image_fit):
"""
:param get_level1:
:param get_level2:
:param crop_xi:
:param crop_xf:
:param crop_yi:
:param crop_yf:
:param ignore_crop:
:param image_png:
:param image_tif:
:param image_fit:
:return:
"""
self._settings.setValue(i.GET_LEVEL1, get_level1)
self._settings.setValue(i.GET_LEVEL2, get_level2)
self._settings.setValue(i.CROP_X_AXIS_XI, crop_xi)
self._settings.setValue(i.CROP_X_AXIS_XF, crop_xf)
self._settings.setValue(i.CROP_Y_AXIS_YI, crop_yi)
self._settings.setValue(i.CROP_Y_AXIS_YF, crop_yf)
self._settings.setValue(i.CHEBOX_IGNORE_CROP, ignore_crop)
self._settings.setValue(i.CHEBOX_IMAGE_PNG, image_png)
self._settings.setValue(i.CHEBOX_IMAGE_TIF, image_tif)
self._settings.setValue(i.CHEBOX_IMAGE_FIT, image_fit)
def get_image_settings(self):
return self._settings.value(i.GET_LEVEL1), \
self._settings.value(i.GET_LEVEL2), \
self._settings.value(i.CROP_X_AXIS_XI), \
self._settings.value(i.CROP_X_AXIS_XF), \
self._settings.value(i.CROP_Y_AXIS_YI), \
self._settings.value(i.CROP_Y_AXIS_YF), \
self._settings.value(i.CHEBOX_IGNORE_CROP, True, type=bool), \
self._settings.value(i.CHEBOX_IMAGE_PNG, True, type=bool), \
self._settings.value(i.CHEBOX_IMAGE_TIF, True, type=bool), \
self._settings.value(i.CHEBOX_IMAGE_FIT, True, type=bool)
def get_filepath(self):
return self._settings.value(i.FILENAME)
| gpl-3.0 |
dimacus/selenium | py/test/selenium/webdriver/common/webdriverwait_tests.py | 61 | 16676 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import time
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
def throwSERE(driver):
raise StaleElementReferenceException("test")
class WebDriverWaitTest(unittest.TestCase):
def testShouldExplicitlyWaitForASingleElement(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.ID, "box0"))) # All is well if this doesn't throw.
def testShouldStillFailToFindAnElementWithExplicitWait(self):
self._loadPage("dynamic")
try:
WebDriverWait(self.driver, 0.7).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldExplicitlyWaituntilAtLeastOneElementIsFoundWhenSearchingForMany(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
add.click();
elements = WebDriverWait(self.driver, 2).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) >= 1)
def testShouldFailToFindElementsWhenExplicitWaiting(self):
self._loadPage("dynamic")
try:
elements = WebDriverWait(self.driver, 0.7).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
except TimeoutException as e:
pass # we should get a timeout
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldWaitOnlyAsLongAsTimeoutSpecifiedWhenImplicitWaitsAreSet(self):
self._loadPage("dynamic")
self.driver.implicitly_wait(0.5)
try:
start = time.time()
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.assertTrue(time.time() - start < 1.5,
"Expected to take just over 1 second to execute, but took %f" %
(time.time() - start))
finally:
self.driver.implicitly_wait(0)
def testShouldWaitAtLeastOnce(self):
self._loadPage("simpleTest")
elements_exists = lambda driver: driver.find_elements_by_tag_name('h1')
elements = WebDriverWait(self.driver, 0).until(elements_exists)
self.assertTrue(len(elements) >= 1)
def testWaitUntilNotReturnsIfEvaluatesToFalse(self):
falsum = lambda driver: False
self.assertFalse(WebDriverWait(self.driver, 1).until_not(falsum))
def testWaitShouldStillFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitShouldStillFailIfProduceChildOfIgnoredException(self):
ignored = (WebDriverException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitUntilNotShouldNotFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
self.assertTrue(WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until_not(throwSERE))
def testExpectedConditionTitleIs(self):
self._loadPage("blank")
WebDriverWait(self.driver, 1).until(EC.title_is("blank"))
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_is("not blank"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_is("blank"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionTitleContains(self):
self._loadPage("blank")
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_contains("not"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_contains("blanket"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionVisibilityOfElementLocated(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.assertTrue(element.is_displayed())
def testExpectedConditionVisibilityOf(self):
self._loadPage("javascriptPage")
hidden = self.driver.find_element_by_id('clickToHide')
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of(hidden))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of(hidden))
self.assertTrue(element.is_displayed())
def testExpectedConditionTextToBePresentInElement(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 0.7).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var el = document.getElementById('unwrappable'); el.textContent = el.innerText = 'Unwrappable Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.assertEqual('Unwrappable Expected text', self.driver.find_element_by_id('unwrappable').text)
def testExpectedConditionTextToBePresentInElementValue(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('inputRequired').value = 'Example Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.assertEqual('Example Expected text', self.driver.find_element_by_id('inputRequired').get_attribute('value'))
def testExpectedConditionFrameToBeAvailableAndSwitchToItByName(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '"+self._pageURL('iframeWithAlert')+"'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionFrameToBeAvailableAndSwitchToItByLocator(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '"+self._pageURL('iframeWithAlert')+"'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionInvisiblityOfElementLocated(self):
self._loadPage("javascriptPage")
self.driver.execute_script("delayedShowHide(0, true)")
try:
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("delayedShowHide(200, false)")
element = WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionElementToBeClickable(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("delayedShowHide(200, true)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
element = self.driver.find_element_by_id('clickToHide')
element.click()
WebDriverWait(self.driver, 3.5).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionStalenessOf(self):
self._loadPage('dynamicallyModifiedPage')
element = self.driver.find_element_by_id('element-to-remove')
try:
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('buttonDelete').click()
self.assertEqual('element', element.text)
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
try:
element.text
self.fail("Expected StaleReferenceException to have been thrown")
except StaleElementReferenceException as e:
pass
def testExpectedConditionElementToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.assertTrue(element.is_selected())
def testExpectedConditionElementSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.assertTrue(element.is_selected())
def testExpectedConditionAlertIsPresent(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support alerts")
self._loadPage('blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){alert('alerty')}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
alert = self.driver.switch_to.alert
self.assertEqual('alerty', alert.text)
alert.dismiss()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/playvid.py | 64 | 3299 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TESTS = [{
'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
'info_dict': {
'id': 'RnmBNgtrrJu',
'ext': 'mp4',
'title': 'md5:9256d01c6317e3f703848b5906880dc8',
'duration': 82,
'age_limit': 18,
},
'skip': 'Video removed due to ToS',
}, {
'url': 'http://www.playvid.com/watch/hwb0GpNkzgH',
'md5': '39d49df503ad7b8f23a4432cbf046477',
'info_dict': {
'id': 'hwb0GpNkzgH',
'ext': 'mp4',
'title': 'Ellen Euro Cutie Blond Takes a Sexy Survey Get Facial in The Park',
'age_limit': 18,
'thumbnail': r're:^https?://.*\.jpg$',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse_unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
| gpl-3.0 |
agconti/Shopify-Django | venv/lib/python2.7/site-packages/django/contrib/gis/tests/geoapp/test_feeds.py | 111 | 4189 | from __future__ import absolute_import
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase
from .models import City
class GeoFeedTest(TestCase):
urls = 'django.contrib.gis.tests.geoapp.urls'
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def assertChildNodes(self, elem, expected):
"Taken from regressiontests/syndication/tests.py."
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| mit |
DrMeers/django | django/conf/locale/en_AU/formats.py | 5 | 2111 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
bestwpw/mysql-5.6 | xtrabackup/test/python/subunit/run.py | 61 | 2257 | #!/usr/bin/python
#
# Simple subunit testrunner for python
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Run a unittest testcase reporting results as Subunit.
$ python -m subunit.run mylib.tests.test_suite
"""
import sys
from subunit import TestProtocolClient, get_default_formatter
from testtools.run import (
BUFFEROUTPUT,
CATCHBREAK,
FAILFAST,
TestProgram,
USAGE_AS_MAIN,
)
class SubunitTestRunner(object):
def __init__(self, stream=sys.stdout):
self.stream = stream
def run(self, test):
"Run the given test case or test suite."
result = TestProtocolClient(self.stream)
test(result)
return result
class SubunitTestProgram(TestProgram):
USAGE = USAGE_AS_MAIN
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
usage_text = self.USAGE % usage
usage_lines = usage_text.split('\n')
usage_lines.insert(2, "Run a test suite with a subunit reporter.")
usage_lines.insert(3, "")
print('\n'.join(usage_lines))
sys.exit(2)
if __name__ == '__main__':
stream = get_default_formatter()
runner = SubunitTestRunner(stream)
SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
stdout=sys.stdout)
| gpl-2.0 |
slarosa/QGIS | python/plugins/sextante/algs/ftools/ExtractNodes.py | 3 | 2966 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtractNodes.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.outputs.OutputVector import OutputVector
from sextante.algs.ftools import FToolsUtils as utils
class ExtractNodes(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/extract_nodes.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Extract nodes"
self.group = "Vector geometry tools"
self.addParameter(ParameterVector(self.INPUT, "Input layer", ParameterVector.VECTOR_TYPE_ANY))
self.addOutput(OutputVector(self.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(layer.pendingFields().toList(),
QGis.WKBPoint, layer.crs())
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
current = 0
features = QGisLayers.features(layer)
total = 100.0 / float(len(features))
for f in features:
inGeom = f.geometry()
attrs = f.attributes()
points = utils.extractPoints(inGeom)
outFeat.setAttributes(attrs)
for i in points:
outFeat.setGeometry(outGeom.fromPoint(i))
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
| gpl-2.0 |
dataxu/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_template.py | 34 | 8207 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job_template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job_template.
required: True
description:
description:
- Description to use for the job_template.
required: False
default: null
job_type:
description:
- The job_type to use for the job_template.
required: True
choices: ["run", "check", "scan"]
inventory:
description:
- Inventory to use for the job_template.
required: False
default: null
project:
description:
- Project to use for the job_template.
required: True
playbook:
description:
- Playbook to use for the job_template.
required: True
machine_credential:
description:
- Machine_credential to use for the job_template.
required: False
default: null
cloud_credential:
description:
- Cloud_credential to use for the job_template.
required: False
default: null
network_credential:
description:
- The network_credential to use for the job_template.
required: False
default: null
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
required: False
default: null
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
required: False
default: null
verbosity:
description:
- Control the output level Ansible produces as the playbook runs.
required: False
choices: ["verbose", "debug"]
default: null
job_tags:
description:
- The job_tags to use for the job_template.
required: False
default: null
skip_tags:
description:
- The skip_tags to use for the job_template.
required: False
default: null
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
required: False
default: null
extra_vars_path:
description:
- Path to the extra_vars yaml file.
required: False
default: null
ask_extra_vars:
description:
- Prompt user for extra_vars on launch.
required: False
default: False
ask_tags:
description:
- Prompt user for job tags on launch.
required: False
default: False
ask_job_type:
description:
- Prompt user for job type on launch.
required: False
default: False
ask_inventory:
description:
- Propmt user for inventory on launch.
required: False
default: False
ask_credential:
description:
- Prompt user for credential on launch.
required: False
default: False
become_enabled:
description:
- Should become_enabled.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: Ping
job_type: run
inventory: Local
project: Demo
playbook: ping.yml
machine_credential: Local
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'machine_credential': 'credential',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = ['@' + extra_vars]
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'machine_credential': 'name',
'network_credential': 'name',
'cloud_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
job_type=dict(choices=['run', 'check', 'scan'], required=True),
inventory=dict(),
project=dict(required=True),
playbook=dict(required=True),
machine_credential=dict(),
cloud_credential=dict(),
network_credential=dict(),
forks=dict(type='int'),
limit=dict(),
verbosity=dict(choices=['verbose', 'debug']),
job_tags=dict(),
skip_tags=dict(),
host_config_key=dict(),
extra_vars_path=dict(type='path', required=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
become_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
state = module.params.get('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
kindersung/servo | tests/wpt/web-platform-tests/tools/html5lib/setup.py | 418 | 1694 | from distutils.core import setup
import os
import codecs
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
packages = ['html5lib'] + ['html5lib.'+name
for name in os.listdir(os.path.join('html5lib'))
if os.path.isdir(os.path.join('html5lib', name)) and
not name.startswith('.') and name != 'tests']
current_dir = os.path.dirname(__file__)
with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
setup(name='html5lib',
version='0.9999-dev',
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=packages,
install_requires=[
'six',
],
)
| mpl-2.0 |
pombreda/xhtml2pdf | demo/djangoproject/views.py | 14 | 2078 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
from django import http
from django.shortcuts import render_to_response
from django.template.loader import get_template
from django.template import Context
import ho.pisa as pisa
import cStringIO as StringIO
import cgi
def index(request):
return http.HttpResponse("""
<html><body>
<h1>Example 1</h1>
Please enter some HTML code:
<form action="/download/" method="post" enctype="multipart/form-data">
<textarea name="data">Hello <strong>World</strong></textarea>
<br />
<input type="submit" value="Convert HTML to PDF" />
</form>
<hr>
<h1>Example 2</h1>
<p><a href="ezpdf_sample">Example with template</a>
</body></html>
""")
def download(request):
if request.POST:
result = StringIO.StringIO()
pdf = pisa.CreatePDF(
StringIO.StringIO(request.POST["data"]),
result
)
if not pdf.err:
return http.HttpResponse(
result.getvalue(),
mimetype='application/pdf')
return http.HttpResponse('We had some errors')
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return http.HttpResponse(result.getvalue(), mimetype='application/pdf')
return http.HttpResponse('We had some errors<pre>%s</pre>' % cgi.escape(html))
def ezpdf_sample(request):
blog_entries = []
for i in range(1,10):
blog_entries.append({
'id': i,
'title':'Playing with pisa 3.0.16 and dJango Template Engine',
'body':'This is a simple example..'
})
return render_to_pdf('entries.html',{
'pagesize':'A4',
'title':'My amazing blog',
'blog_entries':blog_entries})
| gpl-2.0 |
ryancoleman/autodock-vina | boost_1_54_0/tools/build/v2/test/core_parallel_multifile_actions_2.py | 45 | 1638 | #!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic, Vladimir Prus
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Added to guard against a bug causing targets to be used before they
# themselves have finished building. This used to happen for targets built by a
# multi-file action that got triggered by another target, except when the
# target triggering the action was the first one in the list of targets
# produced by that action.
#
# Example:
# When target A and target B were declared as created by a single action with
# A being the first one listed, and target B triggered running that action
# then, while the action was still running, target A was already reporting as
# being built causing other targets depending on target A to be built
# prematurely.
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("file.jam", """\
if $(NT)
{
SLEEP = @call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
actions link
{
$(SLEEP) 1
echo 001 - linked
}
link dll lib ;
actions install
{
echo 002 - installed
}
install installed_dll : dll ;
DEPENDS installed_dll : dll ;
DEPENDS all : lib installed_dll ;
""")
t.run_build_system(["-ffile.jam", "-j2"], stdout="""\
...found 4 targets...
...updating 3 targets...
link dll
001 - linked
install installed_dll
002 - installed
...updated 3 targets...
""")
t.cleanup()
| apache-2.0 |
sarthfrey/Texty | lib/werkzeug/filesystem.py | 162 | 2174 | # -*- coding: utf-8 -*-
"""
werkzeug.filesystem
~~~~~~~~~~~~~~~~~~~
Various utilities for the local filesystem.
:copyright: (c) 2015 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
import sys
import warnings
# We do not trust traditional unixes.
has_likely_buggy_unicode_filesystem = \
sys.platform.startswith('linux') or 'bsd' in sys.platform
def _is_ascii_encoding(encoding):
"""
Given an encoding this figures out if the encoding is actually ASCII (which
is something we don't actually want in most cases). This is necessary
because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
'''The warning used by Werkzeug to signal a broken filesystem. Will only be
used once per runtime.'''
_warned_about_filesystem_encoding = False
def get_filesystem_encoding():
"""
Returns the filesystem encoding that should be used. Note that this is
different from the Python understanding of the filesystem encoding which
might be deeply flawed. Do not use this value against Python's unicode APIs
because it might be different. See :ref:`filesystem-encoding` for the exact
behavior.
The concept of a filesystem encoding in generally is not something you
should rely on. As such if you ever need to use this function except for
writing wrapper code reconsider.
"""
global _warned_about_filesystem_encoding
rv = sys.getfilesystemencoding()
if has_likely_buggy_unicode_filesystem and not rv \
or _is_ascii_encoding(rv):
if not _warned_about_filesystem_encoding:
warnings.warn(
'Detected a misconfigured UNIX filesystem: Will use UTF-8 as '
'filesystem encoding instead of {!r}'.format(rv),
BrokenFilesystemWarning)
_warned_about_filesystem_encoding = True
return 'utf-8'
return rv
| apache-2.0 |
HybridF5/nova | nova/cells/scheduler.py | 6 | 10644 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Scheduler
"""
import copy
import time
from oslo_log import log as logging
from six.moves import range
from nova.cells import filters
from nova.cells import weights
from nova import compute
from nova.compute import instance_actions
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova.db import base
from nova import exception
from nova.i18n import _LE, _LI
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler import utils as scheduler_utils
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class CellsScheduler(base.Base):
"""The cells scheduler."""
def __init__(self, msg_runner):
super(CellsScheduler, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.filter_handler = filters.CellFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.cells.scheduler_filter_classes)
self.filters = [cls() for cls in filter_classes]
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
CONF.cells.scheduler_weight_classes)
self.weighers = [cls() for cls in weigher_classes]
def _create_instances_here(self, ctxt, instance_uuids, instance_properties,
instance_type, image, security_groups, block_device_mapping):
instance_values = copy.copy(instance_properties)
# The parent may pass these metadata values as lists, and the
# create call expects it to be a dict.
instance_values['metadata'] = utils.instance_meta(instance_values)
# Pop out things that will get set properly when re-creating the
# instance record.
instance_values.pop('id')
instance_values.pop('name')
instance_values.pop('info_cache')
instance_values.pop('security_groups')
instance_values.pop('flavor')
# FIXME(danms): The instance was brutally serialized before being
# sent over RPC to us. Thus, the pci_requests value wasn't really
# sent in a useful form. Since it was getting ignored for cells
# before it was part of the Instance, skip it now until cells RPC
# is sending proper instance objects.
instance_values.pop('pci_requests', None)
# FIXME(danms): Same for ec2_ids
instance_values.pop('ec2_ids', None)
instances = []
num_instances = len(instance_uuids)
security_groups = (
self.compute_api.security_group_api.populate_security_groups(
security_groups))
for i, instance_uuid in enumerate(instance_uuids):
instance = objects.Instance(context=ctxt)
instance.update(instance_values)
instance.uuid = instance_uuid
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
instance_type,
image,
instance,
security_groups,
block_device_mapping,
num_instances, i)
instances.append(instance)
self.msg_runner.instance_update_at_top(ctxt, instance)
return instances
def _create_action_here(self, ctxt, instance_uuids):
for instance_uuid in instance_uuids:
objects.InstanceAction.action_start(
ctxt,
instance_uuid,
instance_actions.CREATE,
want_result=False)
def _get_possible_cells(self):
cells = self.state_manager.get_child_cells()
our_cell = self.state_manager.get_my_state()
# Include our cell in the list, if we have any capacity info
if not cells or our_cell.capacities:
cells.append(our_cell)
return cells
def _grab_target_cells(self, filter_properties):
cells = self._get_possible_cells()
cells = self.filter_handler.get_filtered_objects(self.filters, cells,
filter_properties)
# NOTE(comstud): I know this reads weird, but the 'if's are nested
# this way to optimize for the common case where 'cells' is a list
# containing at least 1 entry.
if not cells:
if cells is None:
# None means to bypass further scheduling as a filter
# took care of everything.
return
raise exception.NoCellsAvailable()
weighted_cells = self.weight_handler.get_weighed_objects(
self.weighers, cells, filter_properties)
LOG.debug("Weighted cells: %(weighted_cells)s",
{'weighted_cells': weighted_cells})
target_cells = [cell.obj for cell in weighted_cells]
return target_cells
def _build_instances(self, message, target_cells, instance_uuids,
build_inst_kwargs):
"""Attempt to build instance(s) or send msg to child cell."""
ctxt = message.ctxt
instance_properties = obj_base.obj_to_primitive(
build_inst_kwargs['instances'][0])
filter_properties = build_inst_kwargs['filter_properties']
instance_type = filter_properties['instance_type']
image = build_inst_kwargs['image']
security_groups = build_inst_kwargs['security_groups']
block_device_mapping = build_inst_kwargs['block_device_mapping']
LOG.debug("Building instances with routing_path=%(routing_path)s",
{'routing_path': message.routing_path})
for target_cell in target_cells:
try:
if target_cell.is_me:
# Need to create instance DB entries as the conductor
# expects that the instance(s) already exists.
instances = self._create_instances_here(ctxt,
instance_uuids, instance_properties, instance_type,
image, security_groups, block_device_mapping)
build_inst_kwargs['instances'] = instances
# Need to record the create action in the db as the
# conductor expects it to already exist.
self._create_action_here(ctxt, instance_uuids)
self.compute_task_api.build_instances(ctxt,
**build_inst_kwargs)
return
self.msg_runner.build_instances(ctxt, target_cell,
build_inst_kwargs)
return
except Exception:
LOG.exception(_LE("Couldn't communicate with cell '%s'"),
target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
LOG.error(_LE("Couldn't communicate with any cells"))
raise exception.NoCellsAvailable()
def build_instances(self, message, build_inst_kwargs):
image = build_inst_kwargs['image']
instance_uuids = [inst['uuid'] for inst in
build_inst_kwargs['instances']]
instances = build_inst_kwargs['instances']
request_spec = scheduler_utils.build_request_spec(message.ctxt,
image, instances)
filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
'routing_path': message.routing_path,
'host_sched_kwargs': build_inst_kwargs,
'request_spec': request_spec})
self._schedule_build_to_cells(message, instance_uuids,
filter_properties, self._build_instances, build_inst_kwargs)
def _schedule_build_to_cells(self, message, instance_uuids,
filter_properties, method, method_kwargs):
"""Pick a cell where we should create a new instance(s)."""
try:
for i in range(max(0, CONF.cells.scheduler_retries) + 1):
try:
target_cells = self._grab_target_cells(filter_properties)
if target_cells is None:
# a filter took care of scheduling. skip.
return
return method(message, target_cells, instance_uuids,
method_kwargs)
except exception.NoCellsAvailable:
if i == max(0, CONF.cells.scheduler_retries):
raise
sleep_time = max(1, CONF.cells.scheduler_retry_delay)
LOG.info(_LI("No cells available when scheduling. Will "
"retry in %(sleep_time)s second(s)"),
{'sleep_time': sleep_time})
time.sleep(sleep_time)
continue
except Exception:
LOG.exception(_LE("Error scheduling instances %(instance_uuids)s"),
{'instance_uuids': instance_uuids})
ctxt = message.ctxt
for instance_uuid in instance_uuids:
instance = objects.Instance(context=ctxt, uuid=instance_uuid,
vm_state=vm_states.ERROR)
self.msg_runner.instance_update_at_top(ctxt, instance)
try:
instance.vm_state = vm_states.ERROR
instance.save()
except Exception:
pass
| apache-2.0 |
EmadMokhtar/Django | django/core/mail/backends/console.py | 132 | 1402 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super().__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| mit |
t0mk/ansible | lib/ansible/modules/source_control/git.py | 8 | 42135 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
dest:
required: true
description:
- Absolute path of where the repository should be checked out to.
This parameter is required, unless C(clone) is set to C(no)
This change was made in version 1.8.3. Prior to this version,
the C(dest) parameter was always required.
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), adds the hostkey for the repo url if not already
added. If ssh_opts contains "-o StrictHostKeyChecking=no",
this parameter is ignored.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
umask:
required: false
default: null
version_added: "2.2"
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be trusted in the GPG trustdb.
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git:
repo: git://foosball.example.org/path/to/repo.git
dest: /srv/checkout
version: release-0.22
# Example read-write git checkout from github
- git:
repo: ssh://git@github.com/mylogin/hello.git
dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
- git:
repo: git://foosball.example.org/path/to/repo.git
dest: /srv/checkout
update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git:
repo: git://foosball.example.org/path/to/repo.git
dest: /srv/checkout
clone: no
update: no
# Example checkout a github repo and use refspec to fetch all pull requests
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
'''
RETURN = '''
after:
description: last commit revision of the repository retrived during the update
returned: success
type: string
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: commit revision before the repository was updated, "null" for new repository
returned: success
type: string
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: boolean
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: string
sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
'''
import os
import re
import shlex
import stat
import sys
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, get_module_path
from ansible.module_utils.known_hosts import add_git_host_key
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
f = open(headfile, 'r')
rawdata = f.readline()
f.close()
except:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/',2)
res = nrefparts[-1].rstrip('\n')
except:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
#or: git submodule [--quiet] update [--init] [-N|--no-fetch]
#[-f|--force] [--rebase] [--reference <repository>] [--merge]
#[--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[','')
update_line = update_line.replace(']','')
update_line = update_line.replace('|',' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Unable to determine hashes of submodules', stdout=out, stderr=err, rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [ git_path, 'clone' ]
if bare:
cmd.append('--bare')
else:
cmd.extend([ '--origin', remote ])
if depth:
if version == 'HEAD' or refspec:
cmd.extend([ '--depth', str(depth) ])
elif is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend([ '--depth', str(depth) ])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend([ '--depth', str(depth) ])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
if before is None:
return { 'prepared': '>> Newly checked out %s' % after }
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return { 'prepared': out }
elif rc == 0:
return { 'prepared': '>> No visual differences between %s and %s' % (before, after) }
elif err:
return { 'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err) }
else:
return { 'prepared': '>> Failed to get proper diff between %s and %s' % (before, after) }
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_tags(git_path, module, dest):
tags = []
cmd = '%s tag' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tags.append(line.strip())
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch):
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
git_conf = open(repo_path, 'rb')
for line in git_conf:
config_val = line.split(b(':'), 1)
if config_val[0].strip() == b('gitdir'):
gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict')
break
else:
# No repo path found
return ''
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
# No repo path found
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
'''Return URL of remote source for repo.'''
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
else:
refspecs.append(version)
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/'+version+':refs/tags/'+version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# add the submodule repo's hostkey
if line.strip().startswith('url'):
repo = line.split('=', 1)[1].strip()
if module.params['ssh_opts'] is not None:
if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
### FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s'
% (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [ git_path, 'submodule', 'sync' ]
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ]
else:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s" % (git_path, remote)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
if version in get_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important, so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
result = dict(changed = False, warnings=list())
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except:
module.fail_json(msg="umask must be an octal integer",
details=str(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if repo.startswith('/'):
repo = 'file://' + repo
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = None
if key_file or ssh_opts:
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
# add the git repo's hostkey
if module.params['ssh_opts'] is not None:
if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
need_fetch = True
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
need_fetch = False
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist.')
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
beaufortfrancois/chromium-dashboard | server.py | 2 | 8809 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import json
import logging
import os
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
import common
import models
import settings
import http2push.http2push as http2push
def normalized_name(val):
return val.lower().replace(' ', '').replace('/', '')
def first_of_milestone(feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
if (str(f['shipped_milestone']) == str(milestone) or
f['impl_status_chrome'] == str(milestone)):
return i
return -1
class MainHandler(http2push.PushHandler, common.ContentHandler, common.JSONHandler):
def __get_omaha_data(self):
omaha_data = memcache.get('omaha_data')
if omaha_data is None:
result = urlfetch.fetch('https://omahaproxy.appspot.com/all.json')
if result.status_code == 200:
omaha_data = json.loads(result.content)
memcache.set('omaha_data', omaha_data, time=86400) # cache for 24hrs.
return omaha_data
def __annotate_first_of_milestones(self, feature_list):
try:
omaha_data = self.__get_omaha_data()
win_versions = omaha_data[0]['versions']
for v in win_versions:
s = v.get('version') or v.get('prev_version')
LATEST_VERSION = int(s.split('.')[0])
break
# TODO(ericbidelman) - memcache this calculation as part of models.py
milestones = range(1, LATEST_VERSION + 1)
milestones.reverse()
versions = [
models.IMPLEMENTATION_STATUS[models.NO_ACTIVE_DEV],
models.IMPLEMENTATION_STATUS[models.PROPOSED],
models.IMPLEMENTATION_STATUS[models.IN_DEVELOPMENT],
]
versions.extend(milestones)
versions.append(models.IMPLEMENTATION_STATUS[models.NO_LONGER_PURSUING])
last_good_idx = 0
for i, version in enumerate(versions):
idx = first_of_milestone(feature_list, version, start=last_good_idx)
if idx != -1:
feature_list[idx]['first_of_milestone'] = True
last_good_idx = idx
except Exception as e:
logging.error(e)
def __get_feature_list(self):
feature_list = models.Feature.get_chronological() # Memcached
self.__annotate_first_of_milestones(feature_list)
return feature_list
def get(self, path, feature_id=None):
# Default to features page.
# TODO: remove later when we want an index.html
if not path:
return self.redirect('/features')
# Default /metrics to CSS ranking.
# TODO: remove later when we want /metrics/index.html
if path == 'metrics' or path == 'metrics/css':
return self.redirect('/metrics/css/popularity')
# Remove trailing slash from URL and redirect. e.g. /metrics/ -> /metrics
if feature_id == '':
return self.redirect(self.request.path.rstrip('/'))
template_data = {}
push_urls = [] # URLs to push in this response.
if path.startswith('features'):
if path.endswith('.json'): # JSON request.
feature_list = self.__get_feature_list()
return common.JSONHandler.get(self, feature_list, formatted=True)
elif path.endswith('.xml'): # Atom feed request.
status = self.request.get('status', None)
if status:
feature_list = models.Feature.get_all_with_statuses(status.split(','))
else:
filterby = None
category = self.request.get('category', None)
# Support setting larger-than-default Atom feed sizes so that web
# crawlers can use this as a full site feed.
try:
max_items = int(self.request.get('max-items',
settings.RSS_FEED_LIMIT))
except TypeError:
max_items = settings.RSS_FEED_LIMIT
if category is not None:
for k,v in models.FEATURE_CATEGORIES.iteritems():
normalized = normalized_name(v)
if category == normalized:
filterby = ('category =', k)
break
feature_list = models.Feature.get_all( # Memcached
limit=max_items,
filterby=filterby,
order='-updated')
return self.render_atom_feed('Features', feature_list)
else:
# if settings.PROD:
# feature_list = self.__get_feature_list()
# else:
# result = urlfetch.fetch(
# self.request.scheme + '://' + self.request.host +
# '/static/js/mockdata.json')
# feature_list = json.loads(result.content)
# template_data['features'] = json.dumps(
# feature_list, separators=(',',':'))
template_data['categories'] = [
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['IMPLEMENTATION_STATUSES'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.IMPLEMENTATION_STATUS.iteritems()])
template_data['VENDOR_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.VENDOR_VIEWS.iteritems()])
template_data['WEB_DEV_VIEWS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.WEB_DEV_VIEWS.iteritems()])
template_data['STANDARDS_VALS'] = json.dumps([
{'key': k, 'val': v} for k,v in
models.STANDARDIZATION.iteritems()])
push_urls = http2push.use_push_manifest('push_manifest_features.json')
elif path.startswith('feature'):
feature = None
try:
feature = models.Feature.get_feature(int(feature_id))
except TypeError:
pass
if feature is None:
self.abort(404)
template_data['feature'] = feature
elif path.startswith('metrics/css/timeline'):
properties = sorted(
models.CssPropertyHistogram.get_all().iteritems(), key=lambda x:x[1])
template_data['CSS_PROPERTY_BUCKETS'] = json.dumps(
properties, separators=(',',':'))
elif path.startswith('metrics/feature/timeline'):
properties = sorted(
models.FeatureObserverHistogram.get_all().iteritems(), key=lambda x:x[1])
template_data['FEATUREOBSERVER_BUCKETS'] = json.dumps(
properties, separators=(',',':'))
elif path.startswith('omaha_data'):
omaha_data = self.__get_omaha_data()
return common.JSONHandler.get(self, omaha_data, formatted=True)
elif path.startswith('samples'):
feature_list = models.Feature.get_shipping_samples() # Memcached
if path.endswith('.json'): # JSON request.
return common.JSONHandler.get(self, feature_list, formatted=True)
elif path.endswith('.xml'): # Atom feed request.
# Support setting larger-than-default Atom feed sizes so that web
# crawlers can use this as a full site feed.
try:
max_items = int(self.request.get('max-items',
settings.RSS_FEED_LIMIT))
except TypeError:
max_items = settings.RSS_FEED_LIMIT
return self.render_atom_feed('Samples', feature_list)
else:
template_data['FEATURES'] = json.dumps(feature_list, separators=(',',':'))
template_data['CATEGORIES'] = [
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['categories'] = dict([
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()])
if path.startswith('metrics/'):
push_urls = http2push.use_push_manifest('push_manifest_metrics.json')
# Add Link rel=preload header for h2 push on .html file requests.
if push_urls:
self.response.headers.add_header(
'Link', self._generate_link_preload_headers(push_urls))
self.render(data=template_data, template_path=os.path.join(path + '.html'))
# Main URL routes.
routes = [
('/(.*)/([0-9]*)', MainHandler),
('/(.*)', MainHandler),
]
app = webapp2.WSGIApplication(routes, debug=settings.DEBUG)
app.error_handlers[404] = common.handle_404
if settings.PROD and not settings.DEBUG:
app.error_handlers[500] = common.handle_500
| bsd-3-clause |
willingc/oh-mainline | vendor/packages/twisted/twisted/internet/_win32stdio.py | 96 | 3115 | # -*- test-case-name: twisted.test.test_stdio -*-
"""
Windows-specific implementation of the L{twisted.internet.stdio} interface.
"""
import win32api
import os, msvcrt
from zope.interface import implements
from twisted.internet.interfaces import IHalfCloseableProtocol, ITransport, IAddress
from twisted.internet.interfaces import IConsumer, IPushProducer
from twisted.internet import _pollingfile, main
from twisted.python.failure import Failure
class Win32PipeAddress(object):
implements(IAddress)
class StandardIO(_pollingfile._PollingTimer):
implements(ITransport,
IConsumer,
IPushProducer)
disconnecting = False
disconnected = False
def __init__(self, proto):
"""
Start talking to standard IO with the given protocol.
Also, put it stdin/stdout/stderr into binary mode.
"""
from twisted.internet import reactor
for stdfd in range(0, 1, 2):
msvcrt.setmode(stdfd, os.O_BINARY)
_pollingfile._PollingTimer.__init__(self, reactor)
self.proto = proto
hstdin = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
hstdout = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
self.stdin = _pollingfile._PollableReadPipe(
hstdin, self.dataReceived, self.readConnectionLost)
self.stdout = _pollingfile._PollableWritePipe(
hstdout, self.writeConnectionLost)
self._addPollableResource(self.stdin)
self._addPollableResource(self.stdout)
self.proto.makeConnection(self)
def dataReceived(self, data):
self.proto.dataReceived(data)
def readConnectionLost(self):
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.readConnectionLost()
self.checkConnLost()
def writeConnectionLost(self):
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.writeConnectionLost()
self.checkConnLost()
connsLost = 0
def checkConnLost(self):
self.connsLost += 1
if self.connsLost >= 2:
self.disconnecting = True
self.disconnected = True
self.proto.connectionLost(Failure(main.CONNECTION_DONE))
# ITransport
def write(self, data):
self.stdout.write(data)
def writeSequence(self, seq):
self.stdout.write(''.join(seq))
def loseConnection(self):
self.disconnecting = True
self.stdin.close()
self.stdout.close()
def getPeer(self):
return Win32PipeAddress()
def getHost(self):
return Win32PipeAddress()
# IConsumer
def registerProducer(self, producer, streaming):
return self.stdout.registerProducer(producer, streaming)
def unregisterProducer(self):
return self.stdout.unregisterProducer()
# def write() above
# IProducer
def stopProducing(self):
self.stdin.stopProducing()
# IPushProducer
def pauseProducing(self):
self.stdin.pauseProducing()
def resumeProducing(self):
self.stdin.resumeProducing()
| agpl-3.0 |
encukou/samba | python/samba/tests/samba3.py | 35 | 8434 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.samba3."""
from samba.samba3 import (
Registry,
WinsDatabase,
IdmapDatabase,
)
from samba.samba3 import passdb
from samba.samba3 import param as s3param
from samba.tests import TestCase, TestCaseInTempDir
from samba.dcerpc.security import dom_sid
import os
for p in [ "../../../../../testdata/samba3", "../../../../testdata/samba3" ]:
DATADIR = os.path.join(os.path.dirname(__file__), p)
if os.path.exists(DATADIR):
break
class RegistryTestCase(TestCase):
def setUp(self):
super(RegistryTestCase, self).setUp()
self.registry = Registry(os.path.join(DATADIR, "registry"))
def tearDown(self):
self.registry.close()
super(RegistryTestCase, self).tearDown()
def test_length(self):
self.assertEquals(28, len(self.registry))
def test_keys(self):
self.assertTrue("HKLM" in self.registry.keys())
def test_subkeys(self):
self.assertEquals(["SOFTWARE", "SYSTEM"], self.registry.subkeys("HKLM"))
def test_values(self):
self.assertEquals({'DisplayName': (1L, 'E\x00v\x00e\x00n\x00t\x00 \x00L\x00o\x00g\x00\x00\x00'),
'ErrorControl': (4L, '\x01\x00\x00\x00')},
self.registry.values("HKLM/SYSTEM/CURRENTCONTROLSET/SERVICES/EVENTLOG"))
class PassdbTestCase(TestCaseInTempDir):
def setUp(self):
super(PassdbTestCase, self).setUp()
os.system("cp -r %s %s" % (DATADIR, self.tempdir))
datadir = os.path.join(self.tempdir, "samba3")
self.lp = s3param.get_context()
self.lp.load(os.path.join(datadir, "smb.conf"))
self.lp.set("private dir", datadir)
self.lp.set("state directory", datadir)
self.lp.set("lock directory", datadir)
self.lp.set("cache directory", datadir)
passdb.set_secrets_dir(datadir)
self.pdb = passdb.PDB("tdbsam")
def tearDown(self):
self.lp = []
self.pdb = []
os.system("rm -rf %s" % os.path.join(self.tempdir, "samba3"))
super(PassdbTestCase, self).tearDown()
def test_param(self):
self.assertEquals("BEDWYR", self.lp.get("netbios name"))
self.assertEquals("SAMBA", self.lp.get("workgroup"))
self.assertEquals("USER", self.lp.get("security"))
def test_policy(self):
policy = self.pdb.get_account_policy()
self.assertEquals(0, policy['bad lockout attempt'])
self.assertEquals(-1, policy['disconnect time'])
self.assertEquals(0, policy['lockout duration'])
self.assertEquals(999999999, policy['maximum password age'])
self.assertEquals(0, policy['minimum password age'])
self.assertEquals(5, policy['min password length'])
self.assertEquals(0, policy['password history'])
self.assertEquals(0, policy['refuse machine password change'])
self.assertEquals(0, policy['reset count minutes'])
self.assertEquals(0, policy['user must logon to change password'])
def test_get_sid(self):
domain_sid = passdb.get_global_sam_sid()
self.assertEquals(dom_sid("S-1-5-21-2470180966-3899876309-2637894779"), domain_sid)
def test_usernames(self):
userlist = self.pdb.search_users(0)
self.assertEquals(3, len(userlist))
def test_getuser(self):
user = self.pdb.getsampwnam("root")
self.assertEquals(16, user.acct_ctrl)
self.assertEquals("", user.acct_desc)
self.assertEquals(0, user.bad_password_count)
self.assertEquals(0, user.bad_password_time)
self.assertEquals(0, user.code_page)
self.assertEquals(0, user.country_code)
self.assertEquals("", user.dir_drive)
self.assertEquals("BEDWYR", user.domain)
self.assertEquals("root", user.full_name)
self.assertEquals(dom_sid('S-1-5-21-2470180966-3899876309-2637894779-513'), user.group_sid)
self.assertEquals("\\\\BEDWYR\\root", user.home_dir)
self.assertEquals([-1 for i in range(21)], user.hours)
self.assertEquals(21, user.hours_len)
self.assertEquals(9223372036854775807, user.kickoff_time)
self.assertEquals(None, user.lanman_passwd)
self.assertEquals(9223372036854775807, user.logoff_time)
self.assertEquals(0, user.logon_count)
self.assertEquals(168, user.logon_divs)
self.assertEquals("", user.logon_script)
self.assertEquals(0, user.logon_time)
self.assertEquals("", user.munged_dial)
self.assertEquals('\x87\x8d\x80\x14`l\xda)gzD\xef\xa15?\xc7', user.nt_passwd)
self.assertEquals("", user.nt_username)
self.assertEquals(1125418267, user.pass_can_change_time)
self.assertEquals(1125418267, user.pass_last_set_time)
self.assertEquals(2125418266, user.pass_must_change_time)
self.assertEquals(None, user.plaintext_passwd)
self.assertEquals("\\\\BEDWYR\\root\\profile", user.profile_path)
self.assertEquals(None, user.pw_history)
self.assertEquals(dom_sid("S-1-5-21-2470180966-3899876309-2637894779-1000"), user.user_sid)
self.assertEquals("root", user.username)
self.assertEquals("", user.workstations)
def test_group_length(self):
grouplist = self.pdb.enum_group_mapping()
self.assertEquals(13, len(grouplist))
def test_get_group(self):
group = self.pdb.getgrsid(dom_sid("S-1-5-32-544"))
self.assertEquals("Administrators", group.nt_name)
self.assertEquals(-1, group.gid)
self.assertEquals(5, group.sid_name_use)
def test_groupsids(self):
grouplist = self.pdb.enum_group_mapping()
sids = []
for g in grouplist:
sids.append(str(g.sid))
self.assertTrue("S-1-5-32-544" in sids)
self.assertTrue("S-1-5-32-545" in sids)
self.assertTrue("S-1-5-32-546" in sids)
self.assertTrue("S-1-5-32-548" in sids)
self.assertTrue("S-1-5-32-549" in sids)
self.assertTrue("S-1-5-32-550" in sids)
self.assertTrue("S-1-5-32-551" in sids)
def test_alias_length(self):
aliaslist = self.pdb.search_aliases()
self.assertEquals(1, len(aliaslist))
self.assertEquals("Jelmers NT Group", aliaslist[0]['account_name'])
class WinsDatabaseTestCase(TestCase):
def setUp(self):
super(WinsDatabaseTestCase, self).setUp()
self.winsdb = WinsDatabase(os.path.join(DATADIR, "wins.dat"))
def test_length(self):
self.assertEquals(22, len(self.winsdb))
def test_first_entry(self):
self.assertEqual((1124185120, ["192.168.1.5"], 0x64), self.winsdb["ADMINISTRATOR#03"])
def tearDown(self):
self.winsdb.close()
super(WinsDatabaseTestCase, self).tearDown()
class IdmapDbTestCase(TestCase):
def setUp(self):
super(IdmapDbTestCase, self).setUp()
self.idmapdb = IdmapDatabase(os.path.join(DATADIR,
"winbindd_idmap"))
def test_user_hwm(self):
self.assertEquals(10000, self.idmapdb.get_user_hwm())
def test_group_hwm(self):
self.assertEquals(10002, self.idmapdb.get_group_hwm())
def test_uids(self):
self.assertEquals(1, len(list(self.idmapdb.uids())))
def test_gids(self):
self.assertEquals(3, len(list(self.idmapdb.gids())))
def test_get_user_sid(self):
self.assertEquals("S-1-5-21-58189338-3053988021-627566699-501", self.idmapdb.get_user_sid(65534))
def test_get_group_sid(self):
self.assertEquals("S-1-5-21-2447931902-1787058256-3961074038-3007", self.idmapdb.get_group_sid(10001))
def tearDown(self):
self.idmapdb.close()
super(IdmapDbTestCase, self).tearDown()
| gpl-3.0 |
nolanliou/tensorflow | tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py | 133 | 2763 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv3DBackpropFilterV2GradTest(test.TestCase):
def testGradient(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [2, 4, 3, 3, 2]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 3, 2, 3]
strides = [1, stride, stride, stride, 1]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv3d(in_val,
array_ops.zeros(filter_shape), strides,
padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
out_backprop_val, strides,
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv3d_backprop_filter gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
| apache-2.0 |
PredictiveScienceLab/GPy | GPy/testing/mpi_tests.py | 15 | 2682 | # Copyright (c) 2013-2014, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
try:
from mpi4py import MPI
import subprocess
class MPITests(unittest.TestCase):
def test_BayesianGPLVM_MPI(self):
code = """
import numpy as np
import GPy
from mpi4py import MPI
np.random.seed(123456)
comm = MPI.COMM_WORLD
N = 100
x = np.linspace(-6., 6., N)
y = np.sin(x) + np.random.randn(N) * 0.05
comm.Bcast(y)
data = np.vstack([x,y])
infr = GPy.inference.latent_function_inference.VarDTC_minibatch(mpi_comm=comm)
m = GPy.models.BayesianGPLVM(data.T,1,mpi_comm=comm)
m.optimize(max_iters=10)
if comm.rank==0:
print float(m.objective_function())
m.inference_method.mpi_comm=None
m.mpi_comm=None
m._trigger_params_changed()
print float(m.objective_function())
"""
with open('mpi_test__.py','w') as f:
f.write(code)
f.close()
p = subprocess.Popen('mpirun -n 4 python mpi_test__.py',stdout=subprocess.PIPE,shell=True)
(stdout, stderr) = p.communicate()
L1 = float(stdout.splitlines()[-2])
L2 = float(stdout.splitlines()[-1])
self.assertTrue(np.allclose(L1,L2))
import os
os.remove('mpi_test__.py')
def test_SparseGPRegression_MPI(self):
code = """
import numpy as np
import GPy
from mpi4py import MPI
np.random.seed(123456)
comm = MPI.COMM_WORLD
N = 100
x = np.linspace(-6., 6., N)
y = np.sin(x) + np.random.randn(N) * 0.05
comm.Bcast(y)
data = np.vstack([x,y])
#infr = GPy.inference.latent_function_inference.VarDTC_minibatch(mpi_comm=comm)
m = GPy.models.SparseGPRegression(data[:1].T,data[1:2].T,mpi_comm=comm)
m.optimize(max_iters=10)
if comm.rank==0:
print float(m.objective_function())
m.inference_method.mpi_comm=None
m.mpi_comm=None
m._trigger_params_changed()
print float(m.objective_function())
"""
with open('mpi_test__.py','w') as f:
f.write(code)
f.close()
p = subprocess.Popen('mpirun -n 4 python mpi_test__.py',stdout=subprocess.PIPE,shell=True)
(stdout, stderr) = p.communicate()
L1 = float(stdout.splitlines()[-2])
L2 = float(stdout.splitlines()[-1])
self.assertTrue(np.allclose(L1,L2))
import os
os.remove('mpi_test__.py')
except:
pass
if __name__ == "__main__":
print("Running unit tests, please be (very) patient...")
try:
import mpi4py
unittest.main()
except:
pass
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/test/test_copy_reg.py | 19 | 3456 | import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEquals(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assert_(copy_reg._extension_registry[mod, func] == code)
self.assert_(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assert_(code not in copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assert_((mod, func) not in copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| mit |
alanquillin/ryu | ryu/contrib/ncclient/operations/flowmon.py | 82 | 1213 | # Copyright 2h009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'Power-control operations'
from ncclient.xml_ import *
from rpc import RPC
PC_URN = "urn:liberouter:params:xml:ns:netconf:power-control:1.0"
class PoweroffMachine(RPC):
"*poweroff-machine* RPC (flowmon)"
DEPENDS = ["urn:liberouter:param:netconf:capability:power-control:1.0"]
def request(self):
return self._request(new_ele(qualify("poweroff-machine", PC_URN)))
class RebootMachine(RPC):
"*reboot-machine* RPC (flowmon)"
DEPENDS = ["urn:liberouter:params:netconf:capability:power-control:1.0"]
def request(self):
return self._request(new_ele(qualify("reboot-machine", PC_URN)))
| apache-2.0 |
bugobliterator/ardupilot-chibios | Tools/ardupilotwaf/boards.py | 2 | 25496 | #!/usr/bin/env python
# encoding: utf-8
from collections import OrderedDict
import sys
import waflib
from waflib.Configure import conf
_board_classes = {}
_board = None
class BoardMeta(type):
def __init__(cls, name, bases, dct):
super(BoardMeta, cls).__init__(name, bases, dct)
if 'abstract' not in cls.__dict__:
cls.abstract = False
if cls.abstract:
return
if not hasattr(cls, 'toolchain'):
cls.toolchain = 'native'
board_name = getattr(cls, 'name', name)
if board_name in _board_classes:
raise Exception('board named %s already exists' % board_name)
_board_classes[board_name] = cls
class Board:
abstract = True
def __init__(self):
self.with_uavcan = False
def configure(self, cfg):
cfg.env.TOOLCHAIN = self.toolchain
cfg.load('toolchain')
cfg.load('cxx_checks')
env = waflib.ConfigSet.ConfigSet()
self.configure_env(cfg, env)
d = env.get_merged_dict()
# Always prepend so that arguments passed in the command line get
# the priority.
for k, val in d.items():
# Dictionaries (like 'DEFINES') are converted to lists to
# conform to waf conventions.
if isinstance(val, dict):
keys = list(val.keys())
if not isinstance(val, OrderedDict):
keys.sort()
val = ['%s=%s' % (vk, val[vk]) for vk in keys]
if k in cfg.env and isinstance(cfg.env[k], list):
cfg.env.prepend_value(k, val)
else:
cfg.env[k] = val
cfg.ap_common_checks()
cfg.env.prepend_value('INCLUDES', [
cfg.srcnode.find_dir('libraries/AP_Common/missing').abspath()
])
def configure_env(self, cfg, env):
# Use a dictionary instead of the convetional list for definitions to
# make easy to override them. Convert back to list before consumption.
env.DEFINES = {}
env.CFLAGS += [
'-ffunction-sections',
'-fdata-sections',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wshadow',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-trigraphs',
]
if 'clang' in cfg.env.COMPILER_CC:
env.CFLAGS += [
'-fcolor-diagnostics',
'-Wno-gnu-designator',
'-Wno-inconsistent-missing-override',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
'-Wno-c++11-narrowing'
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-g',
'-O0',
'-Wno-trigraphs',
]
env.CXXFLAGS += [
'-std=gnu++11',
'-fdata-sections',
'-ffunction-sections',
'-fno-exceptions',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wshadow',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-reorder',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Werror=format-security',
'-Werror=array-bounds',
'-Werror=uninitialized',
'-Werror=init-self',
'-Werror=switch',
'-Wfatal-errors',
'-Wno-trigraphs',
]
if 'clang++' in cfg.env.COMPILER_CXX:
env.CXXFLAGS += [
'-fcolor-diagnostics',
'-Wno-gnu-designator',
'-Wno-inconsistent-missing-override',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
'-Wno-c++11-narrowing'
]
else:
env.CXXFLAGS += [
'-Werror=unused-but-set-variable'
]
if cfg.env.DEBUG:
env.CXXFLAGS += [
'-g',
'-O0',
]
if cfg.env.DEST_OS == 'darwin':
env.LINKFLAGS += [
'-Wl,-dead_strip',
]
else:
env.LINKFLAGS += [
'-Wl,--gc-sections',
]
if self.with_uavcan:
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp'
]
env.CXXFLAGS += [
'-Wno-error=cast-align',
]
env.DEFINES.update(
UAVCAN_CPP_VERSION = 'UAVCAN_CPP03',
UAVCAN_NO_ASSERTIONS = 1,
UAVCAN_NULLPTR = 'nullptr'
)
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath()
]
# We always want to use PRI format macros
cfg.define('__STDC_FORMAT_MACROS', 1)
def build(self, bld):
bld.ap_version_append_str('GIT_VERSION', bld.git_head_hash(short=True))
import time
ltime = time.localtime()
bld.ap_version_append_int('BUILD_DATE_YEAR', ltime.tm_year)
bld.ap_version_append_int('BUILD_DATE_MONTH', ltime.tm_mon)
bld.ap_version_append_int('BUILD_DATE_DAY', ltime.tm_mday)
Board = BoardMeta('Board', Board.__bases__, dict(Board.__dict__))
def get_boards_names():
return sorted(list(_board_classes.keys()))
@conf
def get_board(ctx):
global _board
if not _board:
if not ctx.env.BOARD:
ctx.fatal('BOARD environment variable must be set before first call to get_board()')
_board = _board_classes[ctx.env.BOARD]()
return _board
# NOTE: Keeping all the board definitions together so we can easily
# identify opportunities to simplify common flags. In the future might
# be worthy to keep board definitions in files of their own.
class sitl(Board):
def configure_env(self, cfg, env):
super(sitl, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_SITL',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_NONE',
)
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_SITL',
'SITL',
]
if sys.platform == 'cygwin':
env.LIB += [
'winmm',
]
class chibios(Board):
toolchain = 'arm-none-eabi'
def configure_env(self, cfg, env):
super(chibios, self).configure_env(cfg, env)
env.BOARD = self.name
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_CHIBIOS',
HAVE_OCLOEXEC = 0,
HAVE_STD_NULLPTR_T = 0,
)
if self.with_uavcan:
env.AP_LIBRARIES += [
'modules/uavcan/libuavcan_drivers/stm32/driver/src/*.cpp'
]
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan_drivers/stm32/driver/include').abspath()
]
env.AP_LIBRARIES += [
'AP_HAL_ChibiOS',
]
# make board name available for USB IDs
env.CHIBIOS_BOARD_NAME = 'HAL_BOARD_NAME="%s"' % self.name
env.CXXFLAGS += [
'-Wlogical-op',
'-Wframe-larger-than=1300',
'-fsingle-precision-constant',
'-Wno-attributes',
'-Wno-error=double-promotion',
'-Wno-error=missing-declarations',
'-Wno-error=float-equal',
'-Wno-error=undef',
'-Wno-error=cpp',
'-Wno-cast-align',
'-fno-exceptions',
'-fno-rtti',
'-fno-threadsafe-statics',
'-Wall',
'-Wextra',
'-Wno-sign-compare',
'-Wfloat-equal',
'-Wpointer-arith',
'-Wmissing-declarations',
'-Wno-unused-parameter',
'-Werror=array-bounds',
'-Wfatal-errors',
'-Werror=unused-variable',
'-Werror=uninitialized',
'-Werror=init-self',
'-Wframe-larger-than=1024',
'-Werror=unused-but-set-variable',
'-Wno-missing-field-initializers',
'-Wno-trigraphs',
'-Os',
'-g',
'-fno-strict-aliasing',
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-fno-strength-reduce',
'-fno-builtin-printf',
'-fno-builtin-fprintf',
'-fno-builtin-vprintf',
'-fno-builtin-vfprintf',
'-fno-builtin-puts',
'-mcpu=cortex-m4',
'-mno-thumb-interwork',
'-mthumb',
'-mfpu=fpv4-sp-d16',
'-mfloat-abi=hard'
]
bldnode = cfg.bldnode.make_node(self.name)
env.BUILDROOT = bldnode.make_node('').abspath()
env.LINKFLAGS = [
'-mcpu=cortex-m4',
'-Os',
'-g',
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-u_port_lock',
'-u_port_unlock',
'-u_exit',
'-u_kill',
'-u_getpid',
'-u_errno',
'-uchThdExit',
'-u_printf_float',
'-fno-common',
'-nostartfiles',
'-mfloat-abi=hard',
'-mfpu=fpv4-sp-d16',
'-mno-thumb-interwork',
'-mthumb',
'-L%s' % cfg.srcnode.make_node('modules/ChibiOS/os/common/startup/ARMCMx/compilers/GCC/ld/').abspath(),
'-L%s' % cfg.srcnode.make_node('libraries/AP_HAL_ChibiOS/hwdef/common/').abspath(),
'-Wl,--gc-sections,--no-warn-mismatch,--library-path=/ld,--script=%s/ldscript.ld,--defsym=__process_stack_size__=0x400,--defsym=__main_stack_size__=0x400' % env.BUILDROOT,
]
env.LIB += ['gcc', 'm']
if self.with_uavcan:
env.CFLAGS += ['-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2']
env.CXXFLAGS += ['-DUAVCAN_STM32_CHIBIOS=1',
'-DUAVCAN_STM32_NUM_IFACES=2']
env.GIT_SUBMODULES += [
'ChibiOS',
]
cfg.load('chibios')
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=yes'
def build(self, bld):
super(chibios, self).build(bld)
bld.load('chibios')
class skyviper_f412(chibios):
name = 'skyviper-f412'
def configure_env(self, cfg, env):
super(skyviper_f412, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_SKYVIPER_F412',
)
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=no'
env.DEFAULT_PARAMETERS = '../../Tools/Frame_params/SkyViper-F412/defaults.parm'
class skyviper_f412_rev1(skyviper_f412):
name = 'skyviper-f412-rev1'
def configure_env(self, cfg, env):
super(skyviper_f412_rev1, self).configure_env(cfg, env)
class fmuv3(chibios):
name = 'fmuv3'
def __init__(self):
super(fmuv3, self).__init__()
self.with_uavcan = True
def configure_env(self, cfg, env):
super(fmuv3, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_FMUV3',
)
class skyviper_v2450(fmuv3):
name = 'skyviper-v2450'
def __init__(self):
super(skyviper_v2450, self).__init__()
self.with_uavcan = False
def configure_env(self, cfg, env):
super(skyviper_v2450, self).configure_env(cfg, env)
env.DEFAULT_PARAMETERS = '../../Tools/Frame_params/SkyViper-2450GPS/defaults.parm'
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=no'
class fmuv4(chibios):
name = 'fmuv4'
def configure_env(self, cfg, env):
super(fmuv4, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_FMUV4',
)
class mindpx_v2(chibios):
name = 'mindpx-v2'
def configure_env(self, cfg, env):
super(mindpx_v2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_MINDPXV2',
)
class sparky2(chibios):
name = 'sparky2'
def configure_env(self, cfg, env):
super(sparky2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_SPARKY2',
)
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=no'
class revo_mini(chibios):
name = 'revo-mini'
def configure_env(self, cfg, env):
super(revo_mini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_REVOMINI',
)
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=no'
class crazyflie2(chibios):
name = 'crazyflie2'
def configure_env(self, cfg, env):
super(crazyflie2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_CRAZYFLIE2',
)
env.CHIBIOS_FATFS_FLAG = 'USE_FATFS=no'
class mini_pix(chibios):
name = 'mini-pix'
def configure_env(self, cfg, env):
super(mini_pix, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_CHIBIOS_MINIPIX',
)
class linux(Board):
def configure_env(self, cfg, env):
super(linux, self).configure_env(cfg, env)
cfg.find_toolchain_program('pkg-config', var='PKGCONFIG')
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_LINUX',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NONE',
)
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_lttng(env)
cfg.check_libdl(env)
cfg.check_libiio(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_Linux',
]
if self.with_uavcan:
cfg.define('UAVCAN_EXCEPTIONS', 0)
def build(self, bld):
super(linux, self).build(bld)
if bld.options.upload:
waflib.Options.commands.append('rsync')
# Avoid infinite recursion
bld.options.upload = False
class minlure(linux):
def configure_env(self, cfg, env):
super(minlure, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_MINLURE',
)
class erleboard(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erleboard, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBOARD',
)
class navio(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO',
)
class navio2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO2',
)
class edge(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(edge, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_EDGE',
)
class zynq(linux):
toolchain = 'arm-xilinx-linux-gnueabi'
def configure_env(self, cfg, env):
super(zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ZYNQ',
)
class ocpoc_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(ocpoc_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_OCPOC_ZYNQ',
)
class bbbmini(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bbbmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BBBMINI',
)
class blue(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(blue, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BLUE',
)
class pocket(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pocket, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_POCKET',
)
class pxf(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxf, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXF',
)
class bebop(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bebop, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BEBOP',
)
class disco(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(disco, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DISCO',
)
class erlebrain2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erlebrain2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBRAIN2',
)
class bhat(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bhat, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BH',
)
class dark(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(dark, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DARK',
)
class pxfmini(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxfmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXFMINI',
)
class aero(linux):
def configure_env(self, cfg, env):
super(aero, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_AERO',
)
class rst_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(rst_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_RST_ZYNQ',
)
class px4(Board):
abstract = True
toolchain = 'arm-none-eabi'
def __init__(self):
# bootloader name: a file with that name will be used and installed
# on ROMFS
super(px4, self).__init__()
self.bootloader_name = None
# board name: it's the name of this board that's also used as path
# in ROMFS: don't add spaces
self.board_name = None
# px4io binary name: this is the name of the IO binary to be installed
# in ROMFS
self.px4io_name = None
# board-specific init script: if True a file with `board_name` name will
# be searched for in sources and installed in ROMFS as rc.board. This
# init script is used to change the init behavior among different boards.
self.board_rc = False
# Path relative to the ROMFS directory where to find a file with default
# parameters. If set this file will be copied to /etc/defaults.parm
# inside the ROMFS
self.param_defaults = None
self.ROMFS_EXCLUDE = []
def configure(self, cfg):
if not self.bootloader_name:
cfg.fatal('configure: px4: bootloader name is required')
if not self.board_name:
cfg.fatal('configure: px4: board name is required')
super(px4, self).configure(cfg)
cfg.load('px4')
def configure_env(self, cfg, env):
super(px4, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_PX4',
HAVE_OCLOEXEC = 0,
HAVE_STD_NULLPTR_T = 0,
)
env.CXXFLAGS += [
'-Wlogical-op',
'-Wframe-larger-than=1300',
'-fsingle-precision-constant',
'-Wno-attributes',
'-Wno-error=double-promotion',
'-Wno-error=missing-declarations',
'-Wno-error=float-equal',
'-Wno-error=undef',
'-Wno-error=cpp',
]
env.AP_LIBRARIES += [
'AP_HAL_PX4',
]
env.GIT_SUBMODULES += [
'PX4Firmware',
'PX4NuttX',
'uavcan',
]
env.ROMFS_EXCLUDE = self.ROMFS_EXCLUDE
env.PX4_BOOTLOADER_NAME = self.bootloader_name
env.PX4_BOARD_NAME = self.board_name
env.PX4_BOARD_RC = self.board_rc
env.PX4_PX4IO_NAME = self.px4io_name
env.PX4_PARAM_DEFAULTS = self.param_defaults
env.PX4_RC_S_SCRIPT = 'init.d/rcS'
env.AP_PROGRAM_AS_STLIB = True
def build(self, bld):
super(px4, self).build(bld)
bld.ap_version_append_str('NUTTX_GIT_VERSION', bld.git_submodule_head_hash('PX4NuttX', short=True))
bld.ap_version_append_str('PX4_GIT_VERSION', bld.git_submodule_head_hash('PX4Firmware', short=True))
bld.load('px4')
def romfs_exclude(self, exclude):
self.ROMFS_EXCLUDE += exclude
class px4_v1(px4):
name = 'px4-v1'
def __init__(self):
super(px4_v1, self).__init__()
self.bootloader_name = 'px4fmu_bl.bin'
self.board_name = 'px4fmu-v1'
self.px4io_name = 'px4io-v1'
self.romfs_exclude(['oreoled.bin'])
class px4_v2(px4):
name = 'px4-v2'
def __init__(self):
super(px4_v2, self).__init__()
self.bootloader_name = 'px4fmuv2_bl.bin'
self.board_name = 'px4fmu-v2'
self.px4io_name = 'px4io-v2'
self.romfs_exclude(['oreoled.bin'])
self.with_uavcan = True
class px4_v3(px4):
name = 'px4-v3'
def __init__(self):
super(px4_v3, self).__init__()
self.bootloader_name = 'px4fmuv2_bl.bin'
self.board_name = 'px4fmu-v3'
self.px4io_name = 'px4io-v2'
self.with_uavcan = True
class skyviper_v2450_px4(px4_v3):
name = 'skyviper-v2450-px4'
def __init__(self):
super(skyviper_v2450_px4, self).__init__()
self.px4io_name = None
self.param_defaults = '../../../Tools/Frame_params/SkyViper-2450GPS/defaults.parm'
def configure_env(self, cfg, env):
super(skyviper_v2450_px4, self).configure_env(cfg, env)
env.DEFINES.update(
TOY_MODE_ENABLED = 'ENABLED',
USE_FLASH_STORAGE = 1,
ARMING_DELAY_SEC = 0,
LAND_START_ALT = 700,
HAL_RCINPUT_WITH_AP_RADIO = 1,
LAND_DETECTOR_ACCEL_MAX = 2
)
env.PX4_RC_S_SCRIPT = 'init.d/rcS_no_microSD'
env.BUILD_ABIN = True
class px4_v4(px4):
name = 'px4-v4'
def __init__(self):
super(px4_v4, self).__init__()
self.bootloader_name = 'px4fmuv4_bl.bin'
self.board_name = 'px4fmu-v4'
self.romfs_exclude(['oreoled.bin'])
self.with_uavcan = True
class px4_v4pro(px4):
name = 'px4-v4pro'
def __init__(self):
super(px4_v4pro, self).__init__()
self.bootloader_name = 'px4fmuv4pro_bl.bin'
self.board_name = 'px4fmu-v4pro'
self.px4io_name = 'px4io-v2'
self.romfs_exclude(['oreoled.bin'])
self.with_uavcan = True
class aerofc_v1(px4):
name = 'aerofc-v1'
def __init__(self):
super(aerofc_v1, self).__init__()
self.bootloader_name = 'aerofcv1_bl.bin'
self.board_name = 'aerofc-v1'
self.romfs_exclude(['oreoled.bin'])
self.board_rc = True
self.param_defaults = '../../../Tools/Frame_params/intel-aero-rtf.param'
| gpl-3.0 |
OmkarB/pecan | FlaskApp/API/main.py | 1 | 1473 | import Wolfram
import sp
import synopsis
import tt
import api_key
import json
import twitter_streaming as ts
from rosette.api import API, DocumentParameters
user_key = api_key.user_key
ros_url = api_key.ros_url
def handle_query(query):
syn_result = synopsis.wikipedia_summary(query)
#sp_result = sp.look_up(query)
tt_result = tt.look_up(query)
wol_result = Wolfram.main(query)
# twi_result = ts.timed_process(query)
info = {
"syn": syn_result,
#"sp": sp_result,
"tt": tt_result,
"wol": wol_result,
# "twi": twi_result
}
return info
def get_synopsis(syn):
return syn
#return "<p>" + syn + "</syn>"
def get_tt(tt):
return tt
#result = "<ul> "
#for x in tt: result += "<li>" + x + "</li>"
#result += "</ul>"
#return result
def get_wol(wol, key=user_key, alt_url=ros_url):
lst = wol.split("|")
lst.sort(key=lambda a: len(a))
api = API(user_key=user_key, service_url=alt_url)
params = DocumentParameters()
params['language'] = 'eng'
rtotal = []
for line in lst:
params['content'] = line
json_obj = json.loads(json.dumps(api.entities(params, True), indent=2,
ensure_ascii=False))
if json_obj['entities'] and len(rtotal) < 4:
rtotal.append(line)
result = "<ul> "
for x in lst: result += "<li>" + x + "</li>"
result += "</ul>"
return result
| gpl-2.0 |
agentxan/plugin.video.emby | resources/lib/connect/credentials.py | 1 | 4824 | # -*- coding: utf-8 -*-
#################################################################################################
import json
import logging
import os
import time
from datetime import datetime
#################################################################################################
log = logging.getLogger("EMBY."+__name__.split('.')[-1])
#################################################################################################
class Credentials(object):
_shared_state = {} # Borg
credentials = None
path = ""
def __init__(self):
self.__dict__ = self._shared_state
def setPath(self, path):
# Path to save persistant data.txt
self.path = path
def _ensure(self):
if self.credentials is None:
try:
with open(os.path.join(self.path, 'data.txt')) as infile:
self.credentials = json.load(infile)
if not isinstance(self.credentials, dict):
raise ValueError("invalid credentials format")
except Exception as e: # File is either empty or missing
log.warn(e)
self.credentials = {}
log.info("credentials initialized with: %s" % self.credentials)
self.credentials['Servers'] = self.credentials.setdefault('Servers', [])
def _get(self):
self._ensure()
return self.credentials
def _set(self, data):
if data:
self.credentials = data
# Set credentials to file
with open(os.path.join(self.path, 'data.txt'), 'w') as outfile:
json.dump(data, outfile, ensure_ascii=True)
else:
self._clear()
log.info("credentialsupdated")
def _clear(self):
self.credentials = None
# Remove credentials from file
with open(os.path.join(self.path, 'data.txt'), 'w'): pass
def getCredentials(self, data=None):
if data is not None:
self._set(data)
return self._get()
def addOrUpdateServer(self, list_, server):
if server.get('Id') is None:
raise KeyError("Server['Id'] cannot be null or empty")
# Add default DateLastAccessed if doesn't exist.
server.setdefault('DateLastAccessed', "2001-01-01T00:00:00Z")
for existing in list_:
if existing['Id'] == server['Id']:
# Merge the data
if server.get('DateLastAccessed'):
if self._dateObject(server['DateLastAccessed']) > self._dateObject(existing['DateLastAccessed']):
existing['DateLastAccessed'] = server['DateLastAccessed']
if server.get('UserLinkType'):
existing['UserLinkType'] = server['UserLinkType']
if server.get('AccessToken'):
existing['AccessToken'] = server['AccessToken']
existing['UserId'] = server['UserId']
if server.get('ExchangeToken'):
existing['ExchangeToken'] = server['ExchangeToken']
if server.get('RemoteAddress'):
existing['RemoteAddress'] = server['RemoteAddress']
if server.get('ManualAddress'):
existing['ManualAddress'] = server['ManualAddress']
if server.get('LocalAddress'):
existing['LocalAddress'] = server['LocalAddress']
if server.get('Name'):
existing['Name'] = server['Name']
if server.get('WakeOnLanInfos'):
existing['WakeOnLanInfos'] = server['WakeOnLanInfos']
if server.get('LastConnectionMode') is not None:
existing['LastConnectionMode'] = server['LastConnectionMode']
if server.get('ConnectServerId'):
existing['ConnectServerId'] = server['ConnectServerId']
return existing
else:
list_.append(server)
return server
def addOrUpdateUser(self, server, user):
for existing in server.setdefault('Users', []):
if existing['Id'] == user['Id']:
# Merge the data
existing['IsSignedInOffline'] = True
break
else:
server['Users'].append(user)
def _dateObject(self, date):
# Convert string to date
try:
date_obj = time.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
except (ImportError, TypeError):
# TypeError: attribute of type 'NoneType' is not callable
# Known Kodi/python error
date_obj = datetime(*(time.strptime(date, "%Y-%m-%dT%H:%M:%SZ")[0:6]))
return date_obj | gpl-2.0 |
tudorbarascu/QGIS | python/plugins/processing/algs/qgis/ExportGeometryInfo.py | 15 | 8479 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExportGeometryInfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import math
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (NULL,
QgsApplication,
QgsCoordinateTransform,
QgsField,
QgsFields,
QgsWkbTypes,
QgsPointXY,
QgsFeatureSink,
QgsDistanceArea,
QgsProcessingUtils,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ExportGeometryInfo(QgisAlgorithm):
INPUT = 'INPUT'
METHOD = 'CALC_METHOD'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmAddGeometryAttributes.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmAddGeometryAttributes.svg")
def tags(self):
return self.tr('export,add,information,measurements,areas,lengths,perimeters,latitudes,longitudes,x,y,z,extract,points,lines,polygons,sinuosity,fields').split(',')
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
self.export_z = False
self.export_m = False
self.distance_area = None
self.calc_methods = [self.tr('Layer CRS'),
self.tr('Project CRS'),
self.tr('Ellipsoidal')]
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterEnum(self.METHOD,
self.tr('Calculate using'), options=self.calc_methods, defaultValue=0))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Added geom info')))
def name(self):
return 'exportaddgeometrycolumns'
def displayName(self):
return self.tr('Add geometry attributes')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
method = self.parameterAsEnum(parameters, self.METHOD, context)
wkb_type = source.wkbType()
fields = source.fields()
new_fields = QgsFields()
if QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.PolygonGeometry:
new_fields.append(QgsField('area', QVariant.Double))
new_fields.append(QgsField('perimeter', QVariant.Double))
elif QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.LineGeometry:
new_fields.append(QgsField('length', QVariant.Double))
if not QgsWkbTypes.isMultiType(source.wkbType()):
new_fields.append(QgsField('straightdis', QVariant.Double))
new_fields.append(QgsField('sinuosity', QVariant.Double))
else:
if QgsWkbTypes.isMultiType(source.wkbType()):
new_fields.append(QgsField('numparts', QVariant.Int))
else:
new_fields.append(QgsField('xcoord', QVariant.Double))
new_fields.append(QgsField('ycoord', QVariant.Double))
if QgsWkbTypes.hasZ(source.wkbType()):
self.export_z = True
new_fields.append(QgsField('zcoord', QVariant.Double))
if QgsWkbTypes.hasM(source.wkbType()):
self.export_m = True
new_fields.append(QgsField('mvalue', QVariant.Double))
fields = QgsProcessingUtils.combineFields(fields, new_fields)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, wkb_type, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
coordTransform = None
# Calculate with:
# 0 - layer CRS
# 1 - project CRS
# 2 - ellipsoidal
self.distance_area = QgsDistanceArea()
if method == 2:
self.distance_area.setSourceCrs(source.sourceCrs(), context.transformContext())
self.distance_area.setEllipsoid(context.project().ellipsoid())
elif method == 1:
coordTransform = QgsCoordinateTransform(source.sourceCrs(), context.project().crs(), context.project())
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
outFeat = f
attrs = f.attributes()
inGeom = f.geometry()
if inGeom:
if coordTransform is not None:
inGeom.transform(coordTransform)
if inGeom.type() == QgsWkbTypes.PointGeometry:
attrs.extend(self.point_attributes(inGeom))
elif inGeom.type() == QgsWkbTypes.PolygonGeometry:
attrs.extend(self.polygon_attributes(inGeom))
else:
attrs.extend(self.line_attributes(inGeom))
# ensure consistent count of attributes - otherwise null
# geometry features will have incorrect attribute length
# and provider may reject them
if len(attrs) < len(fields):
attrs += [NULL] * (len(fields) - len(attrs))
outFeat.setAttributes(attrs)
sink.addFeature(outFeat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
def point_attributes(self, geometry):
attrs = []
if not geometry.isMultipart():
pt = geometry.constGet()
attrs.append(pt.x())
attrs.append(pt.y())
# add point z/m
if self.export_z:
attrs.append(pt.z())
if self.export_m:
attrs.append(pt.m())
else:
attrs = [geometry.constGet().numGeometries()]
return attrs
def line_attributes(self, geometry):
if geometry.isMultipart():
return [self.distance_area.measureLength(geometry)]
else:
curve = geometry.constGet()
p1 = curve.startPoint()
p2 = curve.endPoint()
straight_distance = self.distance_area.measureLine(QgsPointXY(p1), QgsPointXY(p2))
sinuosity = curve.sinuosity()
if math.isnan(sinuosity):
sinuosity = NULL
return [self.distance_area.measureLength(geometry), straight_distance, sinuosity]
def polygon_attributes(self, geometry):
area = self.distance_area.measureArea(geometry)
perimeter = self.distance_area.measurePerimeter(geometry)
return [area, perimeter]
| gpl-2.0 |
eLBati/server-tools | __unported__/auth_admin_passkey/model/res_config.py | 61 | 3206 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for OpenERP
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv.orm import TransientModel
from openerp.tools.safe_eval import safe_eval
class base_config_settings(TransientModel):
_inherit = 'base.config.settings'
# Getter / Setter Section
def get_default_auth_admin_passkey_send_to_admin(
self, cr, uid, ids, context=None):
icp = self.pool['ir.config_parameter']
return {
'auth_admin_passkey_send_to_admin': safe_eval(icp.get_param(
cr, uid, 'auth_admin_passkey.send_to_admin', 'True')),
}
def set_auth_admin_passkey_send_to_admin(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool['ir.config_parameter']
icp.set_param(
cr, uid, 'auth_admin_passkey.send_to_admin',
repr(config.auth_admin_passkey_send_to_admin))
def get_default_auth_admin_passkey_send_to_user(
self, cr, uid, ids, context=None):
icp = self.pool['ir.config_parameter']
return {
'auth_admin_passkey_send_to_user': safe_eval(icp.get_param(
cr, uid, 'auth_admin_passkey.send_to_user', 'True')),
}
def set_auth_admin_passkey_send_to_user(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool['ir.config_parameter']
icp.set_param(
cr, uid, 'auth_admin_passkey.send_to_user',
repr(config.auth_admin_passkey_send_to_user))
# Columns Section
_columns = {
'auth_admin_passkey_send_to_admin': fields.boolean(
'Send email to admin user.',
help="""When the administrator use his password to login in """
"""with a different account, OpenERP will send an email """
"""to the admin user.""",
),
'auth_admin_passkey_send_to_user': fields.boolean(
string='Send email to user.',
help="""When the administrator use his password to login in """
"""with a different account, OpenERP will send an email """
"""to the account user.""",
),
}
| agpl-3.0 |
danmit/django-calaccess-raw-data | calaccess_raw/models/common.py | 16 | 28129 | from __future__ import unicode_literals
from calaccess_raw import fields
from django.utils.encoding import python_2_unicode_compatible
from django.template.defaultfilters import floatformat
from django.contrib.humanize.templatetags.humanize import intcomma
from .base import CalAccessBaseModel
@python_2_unicode_compatible
class FilernameCd(CalAccessBaseModel):
"""
A combination of CAL-ACCESS tables to provide the analyst with
filer information.
Full name of all PACs, firms, and employers are in the last
name field.
Major donors can be split between first and last name fields, but usually
are contained in the last name field only. Individual names of lobbyists,
candidates/officeholders, treasurers/responsible officers, and major donors
(when they are only an individual's name) use both the first and last name
fields in conjunction.
"""
xref_filer_id = fields.CharField(
verbose_name='crossreference filer ID',
max_length=15,
db_column='XREF_FILER_ID',
db_index=True,
help_text="Alternative filer ID found on many forms"
)
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
db_index=True,
null=True,
help_text="Filer's unique identification number"
)
FILER_TYPE_CHOICES = (
(' NOT DEFINED', 'Undefined'),
('ALL FILERS', 'All filers'),
('CANDIDATE/OFFICEHOLDER', 'Candidate/officeholder'),
('CLIENT', 'Client'),
('EMPLOYER', 'Employer'),
('FIRM', 'Firm'),
('INDIVIDUAL', 'Individual'),
('INITIATIVE', 'Initiative'),
('LOBBYIST', 'Lobbyist'),
(
'MAJOR DONOR/INDEPENDENT EXPENDITURE COMMITTEE',
'Major donor or indenpendent expenditure committee'
),
('PAYMENT TO INFLUENCE', 'Payment to influence'),
('PREPAID ACCOUNT', 'Prepaid account'),
('PROPONENT', 'Proponent'),
('PROPOSITION', 'Proposition'),
('RECIPIENT COMMITTEE', 'Recipient committee'),
('SLATE MAILER ORGANIZATIONS', 'Slate mailer organization'),
(
'TREASURER/RESPONSIBLE OFFICER',
'Treasurer/responsible officer'
)
)
filer_type = fields.CharField(
max_length=45,
db_column='FILER_TYPE',
db_index=True,
choices=FILER_TYPE_CHOICES,
help_text='The type of filer'
)
STATUS_CHOICES = (
('', 'Undefined'),
('A', ''),
('ACTIVE', ''),
('INACTIVE', ''),
('P', ''),
('R', ''),
('S', ''),
('TERMINATED', ''),
('W', ''),
)
status = fields.CharField(
max_length=10,
db_column='STATUS',
db_index=True,
choices=STATUS_CHOICES,
blank=True,
help_text='The status of the filer'
)
effect_dt = fields.DateField(
db_column='EFFECT_DT',
help_text="Effective date for status",
null=True,
)
naml = fields.CharField(
max_length=200, db_column='NAML',
help_text="Last name, sometimes full name"
)
namf = fields.CharField(
max_length=55, db_column='NAMF', blank=True,
help_text="First name"
)
namt = fields.CharField(
max_length=70, db_column='NAMT', blank=True,
help_text="Name prefix or title"
)
nams = fields.CharField(
max_length=32, db_column='NAMS', blank=True,
help_text="Name suffix"
)
adr1 = fields.CharField(
max_length=200,
db_column='ADR1',
blank=True,
help_text="First line of street address"
)
adr2 = fields.CharField(
max_length=200,
db_column='ADR2',
blank=True,
help_text="Second line of street address"
)
city = fields.CharField(
max_length=55,
db_column='CITY',
blank=True,
help_text="City address"
)
st = fields.CharField(
max_length=4,
db_column='ST',
blank=True,
verbose_name="State"
)
zip4 = fields.CharField(
max_length=10,
db_column='ZIP4',
blank=True,
help_text="ZIP Code"
)
phon = fields.CharField(
max_length=60,
db_column='PHON',
blank=True,
verbose_name="Phone",
help_text="Phone number"
)
fax = fields.CharField(
max_length=60,
db_column='FAX',
blank=True,
help_text="Fax number"
)
email = fields.CharField(
max_length=60,
db_column='EMAIL',
blank=True,
help_text="Email address"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILERNAME_CD'
verbose_name = 'FILERNAME_CD'
verbose_name_plural = 'FILERNAME_CD'
ordering = ("naml", "namf",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class FilerFilingsCd(CalAccessBaseModel):
"""
Key table that links filers to their paper, key data entry, legacy,
and electronic filings. This table is used as an index to locate
filing information.
"""
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
db_index=True,
null=True,
help_text="Filer's unique identification number"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
period_id = fields.IntegerField(
null=True,
db_column='PERIOD_ID',
blank=True,
help_text="Identifies the period when the filing was recieved."
)
FORM_ID_CHOICES = (
('E530', ''),
('F111', ''),
('F400', ''),
('F401', ''),
('F402', ''),
('F405', ''),
('F410', ''),
('F410 AT', ''),
('F410ATR', ''),
('F415', ''),
('F416', ''),
('F419', ''),
('F420', ''),
('F421', ''),
('F425', ''),
('F430', ''),
('F440', ''),
('F450', ''),
('F460', ''),
('F461', ''),
('F465', ''),
('F470', ''),
('F470S', ''),
('F480', ''),
('F490', ''),
('F495', ''),
('F496', ''),
('F497', ''),
('F498', ''),
('F500', ''),
('F501', ''),
('F501502', ''),
('F502', ''),
('F555', ''),
('F601', ''),
('F602', ''),
('F603', ''),
('F604', ''),
('F605', ''),
('F606', ''),
('F607', ''),
('F615', ''),
('F625', ''),
('F635', ''),
('F645', ''),
('F666', ''),
('F690', ''),
('F700', ''),
('F777', ''),
('F888', ''),
('F900', ''),
('F999', ''),
)
form_id = fields.CharField(
max_length=7,
db_column='FORM_ID',
db_index=True,
verbose_name='form type',
choices=FORM_ID_CHOICES,
help_text="Form identification code"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
db_index=True,
help_text="Amendment number where 0 is an original filing and 1 to \
999 are amendments"
)
filing_date = fields.DateField(
db_column='FILING_DATE',
help_text="Date the filing entered into the system",
null=True
)
STATEMENT_TYPE_CHOICES = (
(0, ''),
(10001, ''),
(10002, ''),
(10003, ''),
(10004, ''),
(10005, ''),
(10006, ''),
(10007, ''),
)
stmnt_type = fields.IntegerField(
db_column='STMNT_TYPE',
verbose_name="statement type",
db_index=True,
choices=STATEMENT_TYPE_CHOICES,
help_text="Type of statement"
)
STATEMENT_STATUS_CHOICES = (
(0, ''),
(11001, ''),
(11002, ''),
(11003, ''),
)
stmnt_status = fields.IntegerField(
db_column='STMNT_STATUS',
db_index=True,
help_text="The status of the statement. If the filing has been \
reviewed or not reviewed.",
verbose_name='statement status',
choices=STATEMENT_STATUS_CHOICES,
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
user_id = fields.CharField(
max_length=12,
db_column='USER_ID',
verbose_name="User ID",
help_text="User identifier of the PRD user who logged the filing"
)
special_audit = fields.IntegerField(
null=True,
db_column='SPECIAL_AUDIT',
blank=True,
help_text="Denotes whether the filing has been audited for money \
laundering or other special condition."
)
fine_audit = fields.IntegerField(
null=True,
db_column='FINE_AUDIT',
blank=True,
help_text="Indicates whether a filing has been audited for a fine"
)
rpt_start = fields.DateField(
null=True,
db_column='RPT_START',
blank=True,
help_text="Starting date for the period the filing represents",
)
rpt_end = fields.DateField(
null=True,
db_column='RPT_END',
blank=True,
help_text="Ending date for the period the filing represents",
)
rpt_date = fields.DateField(
null=True,
db_column='RPT_DATE',
blank=True,
help_text="Date filing received",
)
FILING_TYPE_CHOICES = (
(0, '0 (Unknown)'),
(22001, 'Electronic'),
(22006, 'Cal Online'),
)
filing_type = fields.IntegerField(
db_column='FILING_TYPE',
null=True,
blank=True,
choices=FILING_TYPE_CHOICES,
help_text="The type of filing"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILER_FILINGS_CD'
verbose_name = 'FILER_FILINGS_CD'
verbose_name_plural = 'FILER_FILINGS_CD'
def __str__(self):
return str("%s %s" % (self.filer_id, self.filing_id))
@python_2_unicode_compatible
class FilingsCd(CalAccessBaseModel):
"""
This table is the parent table from which all links and association to
a filing are derived.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FILING_TYPE_CHOICES = (
(22001, 'Electronic'),
(22002, 'Key data entry'),
(22003, 'Historical lobby'),
(22004, 'Historical campaign'),
(22005, 'AMS'),
(22006, 'Cal Online'),
)
filing_type = fields.IntegerField(
db_column='FILING_TYPE',
db_index=True,
choices=FILING_TYPE_CHOICES,
help_text="The type of filing"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILINGS_CD'
verbose_name = 'FILINGS_CD'
verbose_name_plural = 'FILINGS_CD'
def __str__(self):
return str("%s %s" % (self.filing_id, self.filing_type))
@python_2_unicode_compatible
class SmryCd(CalAccessBaseModel):
"""
Summary totals from filings.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.CharField(
max_length=8,
db_column='LINE_ITEM',
db_index=True,
help_text="Line item number of this record"
)
REC_TYPE_CHOICES = (
('SMRY', 'SMRY'),
)
rec_type = fields.CharField(
max_length=4,
db_column='REC_TYPE',
db_index=True,
choices=REC_TYPE_CHOICES,
verbose_name='record type',
)
FORM_TYPE_CHOICES = (
('401A', 'Form 401 (Slate mailer organization campaign statement): \
Schedule A, payments received'),
('401B', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B, payments made'),
('401B-1', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B1, payments made by agent or independent contractor'),
('A', 'Form 460 (Recipient committee campaign statement): \
Schedule A, '),
('B1', 'Form 460 (Recipient committee campaign statement): \
Schedule B1, '),
('B2', 'Form 460 (Recipient committee campaign statement): \
Schedule B2, '),
('B3', 'Form 460 (Recipient committee campaign statement): \
Schedule B3, '),
('C', 'Form 460 (Recipient committee campaign statement): \
Schedule C, '),
('D', 'Form 460 (Recipient committee campaign statement): \
Schedule D, '),
('E', 'Form 460 (Recipient committee campaign statement): \
Schedule E, '),
('F', 'Form 460 (Recipient committee campaign statement): \
Schedule F, '),
('G', 'Form 460 (Recipient committee campaign statement): \
Schedule G, '),
('H', 'Form 460 (Recipient committee campaign statement): \
Schedule H, '),
('H1', 'Form 460 (Recipient committee campaign statement): \
Schedule H1, '),
('H2', 'Form 460 (Recipient committee campaign statement): \
Schedule H2, '),
('H3', 'Form 460 (Recipient committee campaign statement): \
Schedule H3, '),
('I', 'Form 460 (Recipient committee campaign statement): \
Schedule I, '),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F461', 'Form 461 (Independent expenditure and major donor \
committee campaign statement)'),
('F465', 'Form 465 ()'),
('F625', 'Form 625 (Report of lobbying firm)'),
('F625P2', 'Form 625 (Report of lobbying firm): \
Part 2, payments received in connection with lobbying activity'),
('F625P3A', 'Form 625 (Report of lobbying firm): \
Part 3A, payments for activity expenses made in connection with \
lobbying activities'),
('F625P3B', 'Form 625 (Report of lobbying firm): \
Part 3B, payments to other lobbying firms made in connection with \
lobbying activities'),
('F635', 'Form 635 (Report of lobbyist employer and lobbying \
coalition)'),
('F635P3A', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3A, payments in in-house employee lobbyists'),
('F635P3B', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3B, payments to lobbying firms'),
('F635P3C', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3C, activity expenses'),
('F635P3D', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3D, other payments to influence legislative or \
administrative action'),
('F635P3E', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3E, payments in connection with administrative testimony \
in ratemaking proceedings before the California Public Utilities Commission'),
('F645', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action)'),
('F645P2A', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2A, activity expenses'),
('F645P2B', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2B, \
other payments to influence legislative or administrative action'),
('F645P2C', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2C, \
payments in connection with administrative testimony in ratemaking \
proceedings before the California Public Utilities Commission'),
('F900', 'Form 900 (Form 900 (Public Employee\'s Retirement Board \
Candidate Campaign Statement)'),
('S640', 'Form 640 (Governmental agencies reporting ther payments to \
influence legislative or administrative action attachment)'),
)
form_type = fields.CharField(
max_length=8,
db_column='FORM_TYPE',
db_index=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
amount_a = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_A',
blank=True,
help_text='Summary amount from column A',
verbose_name='amount A'
)
amount_b = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_B',
blank=True,
help_text='Summary amount from column B',
verbose_name='amount B'
)
amount_c = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_C',
blank=True,
help_text='Summary amount from column C',
verbose_name='amount C'
)
elec_dt = fields.DateField(
db_column='ELEC_DT',
null=True,
blank=True,
verbose_name='election date'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'SMRY_CD'
verbose_name = 'SMRY_CD'
verbose_name_plural = 'SMRY_CD'
ordering = ("filing_id", "-amend_id", 'form_type', "line_item")
def __str__(self):
return str(self.filing_id)
def pretty_amount_a(self):
if self.amount_a is None:
return None
return "$%s" % intcomma(floatformat(self.amount_a, 0))
pretty_amount_a.short_description = 'amount A'
def pretty_amount_b(self):
if self.amount_b is None:
return None
return "$%s" % intcomma(floatformat(self.amount_b, 0))
pretty_amount_b.short_description = 'amount B'
def pretty_amount_c(self):
if self.amount_c is None:
return None
return "$%s" % intcomma(floatformat(self.amount_c, 0))
pretty_amount_c.short_description = 'amount C'
@python_2_unicode_compatible
class CvrE530Cd(CalAccessBaseModel):
"""
This table method is undocumented.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('E530', 'Form 530 (Issue advocacy report)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
db_index=True,
help_text='Name of the source filing form or schedule',
choices=FORM_TYPE_CHOICES,
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=32,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES
)
filer_naml = fields.CharField(
db_column='FILER_NAML',
max_length=200,
help_text="Filer last name"
)
filer_namf = fields.CharField(
db_column='FILER_NAMF',
max_length=4,
blank=True,
help_text="Filer first name"
)
filer_namt = fields.CharField(
db_column='FILER_NAMT',
max_length=32,
blank=True,
help_text="Filer title or prefix"
)
filer_nams = fields.CharField(
db_column='FILER_NAMS',
max_length=32,
blank=True,
help_text="Filer suffix"
)
report_num = fields.CharField(
db_column='REPORT_NUM',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
rpt_date = fields.DateField(
db_column='RPT_DATE',
null=True,
help_text="This field is undocumented"
)
filer_city = fields.CharField(
db_column='FILER_CITY',
max_length=16,
blank=True,
help_text='Filer city'
)
filer_st = fields.CharField(
db_column='FILER_ST',
max_length=4,
blank=True,
verbose_name='Filer state'
)
filer_zip4 = fields.CharField(
db_column='FILER_ZIP4',
max_length=10,
blank=True,
help_text='Filer ZIP Code'
)
occupation = fields.CharField(
db_column='OCCUPATION',
max_length=15,
blank=True,
help_text="This field is undocumented"
)
employer = fields.CharField(
db_column='EMPLOYER',
max_length=13,
blank=True,
help_text="This field is undocumented"
)
cand_naml = fields.CharField(
db_column='CAND_NAML',
max_length=46,
help_text="Candidate last name"
)
cand_namf = fields.CharField(
db_column='CAND_NAMF',
max_length=21,
blank=True,
help_text="Candidate first name"
)
cand_namt = fields.CharField(
db_column='CAND_NAMT',
max_length=32,
blank=True,
help_text="Candidate title or prefix"
)
cand_nams = fields.CharField(
db_column='CAND_NAMS',
max_length=32,
blank=True,
help_text="Candidate suffix"
)
district_cd = fields.IntegerField(
db_column='DISTRICT_CD',
help_text="This field is undocumented"
)
office_cd = fields.IntegerField(
db_column='OFFICE_CD',
help_text="This field is undocumented"
)
pmnt_dt = fields.DateField(
db_column='PMNT_DT',
null=True,
help_text="This field is undocumented"
)
pmnt_amount = fields.FloatField(
db_column='PMNT_AMOUNT',
help_text="This field is undocumented"
)
type_literature = fields.IntegerField(
db_column='TYPE_LITERATURE',
help_text="This field is undocumented"
)
type_printads = fields.IntegerField(
db_column='TYPE_PRINTADS',
help_text="This field is undocumented"
)
type_radio = fields.IntegerField(
db_column='TYPE_RADIO',
help_text="This field is undocumented"
)
type_tv = fields.IntegerField(
db_column='TYPE_TV',
help_text="This field is undocumented"
)
type_it = fields.IntegerField(
db_column='TYPE_IT',
help_text="This field is undocumented"
)
type_billboards = fields.IntegerField(
db_column='TYPE_BILLBOARDS',
help_text="This field is undocumented"
)
type_other = fields.IntegerField(
db_column='TYPE_OTHER',
help_text="This field is undocumented"
)
other_desc = fields.CharField(
db_column='OTHER_DESC',
max_length=49,
help_text="This field is undocumented"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR_E530_CD'
verbose_name = 'CVR_E530_CD'
verbose_name_plural = 'CVR_E530_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class TextMemoCd(CalAccessBaseModel):
"""
Text memos attached to electronic filings
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('i', 'i'),
('MEMO', 'MEMO'),
('TEXT', 'TEXT'),
('trun', 'trun'),
('Unde', 'Unde'),
)
rec_type = fields.CharField(
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
verbose_name='record type'
)
FORM_TYPE_CHOICES = (
(' E', ''),
('410', ''),
('460', ''),
('461', ''),
('465', ''),
('496', ''),
('497', ''),
('497P1', ''),
('497P2', ''),
('A', ''),
('A4', ''),
('A6', ''),
('B', ''),
('B1', ''),
('B2', ''),
('B3', ''),
('C', ''),
('COMMENTS', ''),
('CVR', ''),
('D', ''),
('DEBTF', ''),
('E', ''),
('EXPNT', ''),
('F', ''),
('F401', ''),
('F401A', ''),
('F401B', ''),
('F401B-1', ''),
('F405', ''),
('F410', ''),
('F425', ''),
('F450', ''),
('F450P5', ''),
('F460', ''),
('F461', ''),
('F461P1', ''),
('F461P2', ''),
('F461P5', ''),
('F465', ''),
('F465P3', ''),
('F496', ''),
('F496P3', ''),
('F497', ''),
('F497P1', ''),
('F497P2', ''),
('F498-A', ''),
('F498-R', ''),
('F601', ''),
('F601P2A', ''),
('F601P2B', ''),
('F602', ''),
('F603', ''),
('F604', ''),
('F605', ''),
('F606', ''),
('F607', ''),
('F615', ''),
('F615P1', ''),
('F615P2', ''),
('F625', ''),
('F625P2', ''),
('F625P3A', ''),
('F625P3B', ''),
('F625P4B', ''),
('F635', ''),
('F635P3B', ''),
('F635P3C', ''),
('F635P4B', ''),
('F645', ''),
('F645P2A', ''),
('F645P3B', ''),
('G', ''),
('H', ''),
('H1', ''),
('H2', ''),
('H3', ''),
('I', ''),
('PT5', ''),
('RCPTB1', ''),
('RCPTC', ''),
('RCPTI', ''),
('S497', ''),
('S630', ''),
('S635-C', ''),
('S635C', ''),
('S640', ''),
('SCH A', ''),
('SF', ''),
('SMRY', ''),
('SPLT', ''),
('SUM', ''),
('SUMMARY', ''),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=8,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES
)
ref_no = fields.CharField(
db_column='REF_NO',
max_length=20,
blank=True,
help_text='Links text memo to a specific record',
verbose_name='reference number'
)
text4000 = fields.CharField(
db_column='TEXT4000',
max_length=4000,
blank=True,
help_text='Contents of the text memo',
verbose_name='text'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'TEXT_MEMO_CD'
verbose_name = 'TEXT_MEMO_CD'
verbose_name_plural = 'TEXT_MEMO_CD'
def __str__(self):
return str(self.filing_id)
| mit |
yfdyh000/kuma | kuma/wiki/views/edit.py | 3 | 13019 | # -*- coding: utf-8 -*-
from urllib import urlencode
import newrelic.agent
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_http_methods
from ratelimit.decorators import ratelimit
import kuma.wiki.content
from kuma.attachments.forms import AttachmentRevisionForm
from kuma.core.decorators import block_user_agents, login_required, never_cache
from kuma.core.urlresolvers import reverse
from kuma.core.utils import limit_banned_ip_to_0, urlparams
from ..decorators import (check_readonly, prevent_indexing,
process_document_path)
from ..forms import DocumentForm, RevisionForm
from ..models import Document, Revision
from .translate import translate
from .utils import document_form_initial, split_slug
@xframe_options_sameorigin
def _edit_document_collision(request, orig_rev, curr_rev, is_async_submit,
is_raw, rev_form, doc_form, section_id, rev, doc):
"""
Handle when a mid-air collision is detected upon submission
"""
# Process the content as if it were about to be saved, so that the
# html_diff is close as possible.
content = (kuma.wiki.content.parse(request.POST['content'])
.injectSectionIDs()
.serialize())
# Process the original content for a diff, extracting a section if we're
# editing one.
if doc.is_template:
curr_content = curr_rev.content
else:
parsed_content = kuma.wiki.content.parse(curr_rev.content)
parsed_content.injectSectionIDs()
if section_id:
parsed_content.extractSection(section_id)
curr_content = parsed_content.serialize()
if is_raw:
# When dealing with the raw content API, we need to signal the conflict
# differently so the client-side can escape out to a conflict
# resolution UI.
response = HttpResponse('CONFLICT')
response.status_code = 409
return response
# Make this response iframe-friendly so we can hack around the
# save-and-edit iframe button
context = {
'collision': True,
'revision_form': rev_form,
'document_form': doc_form,
'content': content,
'current_content': curr_content,
'section_id': section_id,
'original_revision': orig_rev,
'current_revision': curr_rev,
'revision': rev,
'document': doc,
}
return render(request, 'wiki/edit.html', context)
@newrelic.agent.function_trace()
@block_user_agents
@require_http_methods(['GET', 'POST'])
@login_required # TODO: Stop repeating this knowledge here and in Document.allows_editing_by.
@ratelimit(key='user', rate=limit_banned_ip_to_0, block=True)
@process_document_path
@check_readonly
@prevent_indexing
@never_cache
def edit(request, document_slug, document_locale, revision_id=None):
"""
Create a new revision of a wiki document, or edit document metadata.
"""
doc = get_object_or_404(Document,
locale=document_locale,
slug=document_slug)
# If this document has a parent, then the edit is handled by the
# translate view. Pass it on.
if doc.parent and doc.parent.id != doc.id:
return translate(request, doc.parent.slug, doc.locale, revision_id,
bypass_process_document_path=True)
if revision_id:
rev = get_object_or_404(Revision, pk=revision_id, document=doc)
else:
rev = doc.current_revision or doc.revisions.order_by('-created',
'-id')[0]
# Keep hold of the full post slug
slug_dict = split_slug(document_slug)
# Update the slug, removing the parent path, and
# *only* using the last piece.
# This is only for the edit form.
rev.slug = slug_dict['specific']
section_id = request.GET.get('section', None)
if section_id and not request.is_ajax():
return HttpResponse(ugettext("Sections may only be edited inline."))
disclose_description = bool(request.GET.get('opendescription'))
doc_form = rev_form = None
if doc.allows_revision_by(request.user):
rev_form = RevisionForm(request=request,
instance=rev,
initial={'based_on': rev.id,
'current_rev': rev.id,
'comment': ''},
section_id=section_id)
if doc.allows_editing_by(request.user):
doc_form = DocumentForm(initial=document_form_initial(doc))
# Need to make check *here* to see if this could have a translation parent
show_translation_parent_block = (
(document_locale != settings.WIKI_DEFAULT_LANGUAGE) and
(not doc.parent_id))
if request.method == 'GET':
if not (rev_form or doc_form):
# You can't do anything on this page, so get lost.
raise PermissionDenied
else: # POST
is_async_submit = request.is_ajax()
is_raw = request.GET.get('raw', False)
need_edit_links = request.GET.get('edit_links', False)
parent_id = request.POST.get('parent_id', '')
# Attempt to set a parent
if show_translation_parent_block and parent_id:
try:
parent_doc = get_object_or_404(Document, id=parent_id)
doc.parent = parent_doc
except Document.DoesNotExist:
pass
# Comparing against localized names for the Save button bothers me, so
# I embedded a hidden input:
which_form = request.POST.get('form-type')
if which_form == 'doc':
if doc.allows_editing_by(request.user):
post_data = request.POST.copy()
post_data.update({'locale': document_locale})
doc_form = DocumentForm(post_data, instance=doc)
if doc_form.is_valid():
# if must be here for section edits
if 'slug' in post_data:
post_data['slug'] = u'/'.join([slug_dict['parent'],
post_data['slug']])
# Get the possibly new slug for the imminent redirection:
doc = doc_form.save(parent=None)
return redirect(urlparams(doc.get_edit_url(),
opendescription=1))
disclose_description = True
else:
raise PermissionDenied
elif which_form == 'rev':
if not doc.allows_revision_by(request.user):
raise PermissionDenied
else:
post_data = request.POST.copy()
rev_form = RevisionForm(request=request,
data=post_data,
is_async_submit=is_async_submit,
section_id=section_id)
rev_form.instance.document = doc # for rev_form.clean()
# Come up with the original revision to which these changes
# would be applied.
orig_rev_id = request.POST.get('current_rev', False)
if orig_rev_id is False:
orig_rev = None
else:
orig_rev = Revision.objects.get(pk=orig_rev_id)
# Get the document's actual current revision.
curr_rev = doc.current_revision
if not rev_form.is_valid():
# If this was an Ajax POST, then return a JsonResponse
if is_async_submit:
# Was there a mid-air collision?
if 'current_rev' in rev_form._errors:
# Make the error message safe so the '<' and '>' don't
# get turned into '<' and '>', respectively
rev_form.errors['current_rev'][0] = mark_safe(
rev_form.errors['current_rev'][0])
errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]
data = {
"error": True,
"error_message": errors,
"new_revision_id": curr_rev.id,
}
return JsonResponse(data=data)
# Jump out to a function to escape indentation hell
return _edit_document_collision(
request, orig_rev, curr_rev, is_async_submit,
is_raw, rev_form, doc_form, section_id,
rev, doc)
# Was this an Ajax submission that was marked as spam?
if is_async_submit and '__all__' in rev_form._errors:
# Return a JsonResponse
data = {
"error": True,
"error_message": mark_safe(rev_form.errors['__all__'][0]),
"new_revision_id": curr_rev.id,
}
return JsonResponse(data=data)
if rev_form.is_valid():
rev_form.save(doc)
if (is_raw and orig_rev is not None and
curr_rev.id != orig_rev.id):
# If this is the raw view, and there was an original
# revision, but the original revision differed from the
# current revision at start of editing, we should tell
# the client to refresh the page.
response = HttpResponse('RESET')
response['X-Frame-Options'] = 'SAMEORIGIN'
response.status_code = 205
return response
# Is this an Ajax POST?
if is_async_submit:
# This is the most recent revision id
new_rev_id = rev.document.revisions.order_by('-id').first().id
data = {
"error": False,
"new_revision_id": new_rev_id
}
return JsonResponse(data)
if rev_form.instance.is_approved:
view = 'wiki.document'
else:
view = 'wiki.document_revisions'
# Construct the redirect URL, adding any needed parameters
url = reverse(view, args=[doc.slug], locale=doc.locale)
params = {}
if is_raw:
params['raw'] = 'true'
if need_edit_links:
# Only need to carry over ?edit_links with ?raw,
# because they're on by default in the normal UI
params['edit_links'] = 'true'
if section_id:
# If a section was edited, and we're using the raw
# content API, constrain to that section.
params['section'] = section_id
# Parameter for the document saved, so that we can delete the cached draft on load
params['document_saved'] = 'true'
url = '%s?%s' % (url, urlencode(params))
if not is_raw and section_id:
# If a section was edited, jump to the section anchor
# if we're not getting raw content.
url = '%s#%s' % (url, section_id)
return redirect(url)
parent_path = parent_slug = ''
if slug_dict['parent']:
parent_slug = slug_dict['parent']
if doc.parent_topic_id:
parent_doc = Document.objects.get(pk=doc.parent_topic_id)
parent_path = parent_doc.get_absolute_url()
parent_slug = parent_doc.slug
context = {
'revision_form': rev_form,
'document_form': doc_form,
'section_id': section_id,
'disclose_description': disclose_description,
'parent_slug': parent_slug,
'parent_path': parent_path,
'revision': rev,
'document': doc,
'attachment_form': AttachmentRevisionForm(),
}
return render(request, 'wiki/edit.html', context)
| mpl-2.0 |
tmtowtdi/MontyLacuna | t/bldgs/embassy.py | 1 | 5895 |
import os, sys
bindir = os.path.abspath(os.path.dirname(sys.argv[0]))
libdir = bindir + "/../../lib"
sys.path.append(libdir)
import lacuna as lac
###
### This creates two clients. One must be the leader of an alliance, and the
### other should be un-allied.
###
### guild leader
leader_client = lac.clients.Member(
config_file = bindir + "/../../etc/lacuna.cfg",
config_section = 'play_test',
)
leader_planet = leader_client.get_body_byname( 'bmots rof 1.4' )
leader_emb = leader_planet.get_building_coords( -4, 0 )
### guild invitee
invitee_client = lac.clients.Member(
config_file = bindir + "/../../etc/lacuna.cfg",
config_section = 'play_test_two',
)
invitee_planet = invitee_client.get_body_byname( 'Evolme' )
invitee_emb = invitee_planet.get_building_coords( 5, 0 )
stats = leader_client.get_stats()
### Create new alliance
###
#ally = leader_emb.create_alliance( 'TestTwo Alliance' )
#print( "I created a new alliance named {}. The leader's ID is {}, and it was created on {}."
# .format(ally.name, ally.leader_id, ally.date_created)
#)
### Leave current alliance
###
#invitee_emb.leave_alliance( )
### Dissolve existing alliance
### Note this is using the invitee's embassy. I suggest you have your invitee
### whip up a quick alliance and dissolve that, instead of dissolving your
### leader's alliance.
###
#invitee_emb.dissolve_alliance()
### Get alliance status
###
#ally = leader_emb.get_alliance_status( )
#print( "My alliance is named {}. The leader's ID is {}, and it was created on {}."
# .format(ally.name, ally.leader_id, ally.date_created)
#)
### Send alliance invite
###
#player = stats.find_empire_rank( '', 'tmtowtdi_test' )[0]
#print( "{} has ID {}.".format(player.empire_name, player.empire_id) )
#leader_emb.send_invite( player.empire_id, "Come join my alliance!" )
### Get list of invites your alliance has sent out to potential members, but
### which have not been accepted or rejected yet.
###
#invites = leader_emb.get_pending_invites()
#for i in invites:
# print( "Invite ID {} has been sent out to empire {}, whose ID is {}."
# .format(i.id, i.name, i.empire_id)
# )
### Withdraw an already-sent alliance invite
###
#invites = leader_emb.get_pending_invites()
#leader_emb.withdraw_invite( invites[0].id, "Your invite is being withdrawn." )
### Get list of invitations sent to your empire from alliances you're not
### currently a member of
###
#invites = invitee_emb.get_my_invites()
#for i in invites:
# print( "I have been invited to join {} (ID {}). The invite ID is {}."
# .format(i.name, i.alliance_id, i.id)
# )
### Accept an invitation to an alliance
###
#invites = invitee_emb.get_my_invites()
#ally = invitee_emb.accept_invite( invites[0].id )
#print( "I accepted an invite to {}. The leader's ID is {}, and it was created on {}."
# .format(ally.name, ally.leader_id, ally.date_created)
#)
### Reject an invitation to an alliance
###
#invites = invitee_emb.get_my_invites()
#invitee_emb.reject_invite( invites[0].id )
### Set a new alliance leader
###
#my_ally = leader_client.get_my_alliance()
#if my_ally:
# print( "{}'s current leader's ID is {}.".format(my_ally.name, my_ally.leader_id) )
# new_leader = ''
# for i in my_ally.members:
# if i.id != my_ally.leader_id:
# print( "My new leader will be {} (ID {}).".format(i.name, i.id) )
# new_leader = i
# if not new_leader:
# raise KeyError("I was unable to find a new leader.")
# my_ally_now = leader_emb.assign_alliance_leader( new_leader.id )
# print( "The new leader of {} has an ID of {}."
# .format(my_ally_now.name, my_ally_now.leader_id)
# )
#else:
# print( "You are not in an alliance." )
###
### At this point, your invitee is the leader of the alliance. Go reassign
### leadership to the original leader again.
###
### Update alliance settings
###
#ally_settings = {
# 'forum_uri': 'http://www.example.com',
# 'description': 'This is a new shiny public description',
# 'announcements': 'This is only visible to alliance members but it was definitely set by our test script. Blarg.',
#}
#ally = leader_emb.update_alliance( ally_settings )
#print( "After update, our forum is at {}, our description is {}, and our announcements are {}."
# .format(ally.forum_uri, ally.description, ally.announcements)
#)
### View the contents of the alliance stash
###
#ally_stash = invitee_emb.view_stash()
#print( "I can exchange {} more times today, up to {} units per exchange."
# .format(ally_stash.exchanges_remaining_today, ally_stash.max_exchange_size)
#)
#print( "Some items already in the stash:" )
#cnt = 0
#for i in ally_stash.stash:
# cnt += 1
# if cnt > 3:
# break
# print( "\tThere are {:,} of {} in the stash.".format(int(ally_stash.stash[i]), i) )
#print( "Some items I have stored that I can exchange with the stash:" )
#cnt = 0
#for i in ally_stash.stored:
# cnt += 1
# if cnt > 3:
# break
# print( "\tI have {:,} of {} that can be added to the stash.".format(int(ally_stash.stored[i]), i) )
### Donate to the stash
### It's likely your leader's alliance's stash is already full, and you can't
### donate to a full stash. Have your invitee drop ally, then create his own
### brand new alliance to test this.
###
#donation = { 'apple': 1, }
#ally_stash = invitee_emb.donate_to_stash(donation)
#print( "Currently in the stash:" )
#for i in ally_stash.stash:
# print( "\tThere are {:,} of {} in the stash.".format(int(ally_stash.stash[i]), i) )
### Exchange with the stash
###
#donation = { 'fungus': 1, }
#request = { 'apple': 1 }
#ally_stash = invitee_emb.exchange_with_stash(donation, request)
#print( "Currently in the stash:" )
#for i in ally_stash.stash:
# print( "\tThere are {:,} of {} in the stash.".format(int(ally_stash.stash[i]), i) )
| mit |
KarelJakubec/pip | tests/scripts/test_all_pip.py | 48 | 3783 | import os
import re
import sys
import subprocess
from os.path import dirname, abspath
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import rmtree
src_folder = dirname(dirname(abspath(__file__)))
if sys.platform == 'win32':
bin_dir = 'Scripts'
else:
bin_dir = 'bin'
def all_projects():
data = urllib_request.urlopen('http://pypi.python.org/simple/').read()
projects = [m.group(1) for m in re.finditer(r'<a.*?>(.+)</a>', data)]
return projects
def main(args=None):
if args is None:
args = sys.argv[1:]
if not args:
print('Usage: test_all_pip.py <output-dir>')
sys.exit(1)
output = os.path.abspath(args[0])
if not os.path.exists(output):
print('Creating %s' % output)
os.makedirs(output)
pending_fn = os.path.join(output, 'pending.txt')
if not os.path.exists(pending_fn):
print('Downloading pending list')
projects = all_projects()
print('Found %s projects' % len(projects))
f = open(pending_fn, 'w')
for name in projects:
f.write(name + '\n')
f.close()
print('Starting testing...')
while os.stat(pending_fn).st_size:
_test_packages(output, pending_fn)
print('Finished all pending!')
def _test_packages(output, pending_fn):
package = get_last_item(pending_fn)
print('Testing package %s' % package)
dest_dir = os.path.join(output, package)
print('Creating virtualenv in %s' % dest_dir)
create_venv(dest_dir)
print('Uninstalling actual pip')
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'pip'),
'uninstall',
'-y',
'pip',
])
assert not code, 'pip uninstallation failed'
print('Installing development pip')
code = subprocess.check_call(
[
os.path.join(dest_dir, bin_dir, 'python'),
'setup.py',
'install'
],
cwd=src_folder,
)
assert not code, 'pip installation failed'
print('Trying installation of %s' % dest_dir)
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'pip'),
'install',
package,
])
if code:
print('Installation of %s failed' % package)
print('Now checking easy_install...')
create_venv(dest_dir)
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'easy_install'),
package,
])
if code:
print('easy_install also failed')
add_package(os.path.join(output, 'easy-failure.txt'), package)
else:
print('easy_install succeeded')
add_package(os.path.join(output, 'failure.txt'), package)
pop_last_item(pending_fn, package)
else:
print('Installation of %s succeeded' % package)
add_package(os.path.join(output, 'success.txt'), package)
pop_last_item(pending_fn, package)
rmtree(dest_dir)
def create_venv(dest_dir):
if os.path.exists(dest_dir):
rmtree(dest_dir)
print('Creating virtualenv in %s' % dest_dir)
code = subprocess.check_call([
'virtualenv',
'--no-site-packages',
dest_dir,
])
assert not code, "virtualenv failed"
def get_last_item(fn):
f = open(fn, 'r')
lines = f.readlines()
f.close()
return lines[-1].strip()
def pop_last_item(fn, line=None):
f = open(fn, 'r')
lines = f.readlines()
f.close()
if line:
assert lines[-1].strip() == line.strip()
lines.pop()
f = open(fn, 'w')
f.writelines(lines)
f.close()
def add_package(filename, package):
f = open(filename, 'a')
f.write(package + '\n')
f.close()
if __name__ == '__main__':
main()
| mit |
RO-ny9/python-for-android | python-modules/twisted/twisted/mail/tap.py | 54 | 6882 | # -*- test-case-name: twisted.mail.test.test_options -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""I am the support module for creating mail servers with twistd
"""
import os
import sys
from twisted.mail import mail
from twisted.mail import maildir
from twisted.mail import relay
from twisted.mail import relaymanager
from twisted.mail import alias
from twisted.python import usage
from twisted.cred import checkers
from twisted.application import internet
class Options(usage.Options):
synopsis = "[options]"
optParameters = [
["pop3", "p", 8110, "Port to start the POP3 server on (0 to disable).", usage.portCoerce],
["pop3s", "S", 0, "Port to start the POP3-over-SSL server on (0 to disable).", usage.portCoerce],
["smtp", "s", 8025, "Port to start the SMTP server on (0 to disable).", usage.portCoerce],
["certificate", "c", None, "Certificate file to use for SSL connections"],
["relay", "R", None,
"Relay messages according to their envelope 'To', using the given"
"path as a queue directory."],
["hostname", "H", None, "The hostname by which to identify this server."],
]
optFlags = [
["esmtp", "E", "Use RFC 1425/1869 SMTP extensions"],
["disable-anonymous", None, "Disallow non-authenticated SMTP connections"],
]
zsh_actions = {"hostname" : "_hosts"}
longdesc = "This creates a mail.tap file that can be used by twistd."
def __init__(self):
usage.Options.__init__(self)
self.service = mail.MailService()
self.last_domain = None
def opt_passwordfile(self, filename):
"""Specify a file containing username:password login info for authenticated ESMTP connections."""
ch = checkers.OnDiskUsernamePasswordDatabase(filename)
self.service.smtpPortal.registerChecker(ch)
opt_P = opt_passwordfile
def opt_default(self):
"""Make the most recently specified domain the default domain."""
if self.last_domain:
self.service.addDomain('', self.last_domain)
else:
raise usage.UsageError("Specify a domain before specifying using --default")
opt_D = opt_default
def opt_maildirdbmdomain(self, domain):
"""generate an SMTP/POP3 virtual domain which saves to \"path\"
"""
try:
name, path = domain.split('=')
except ValueError:
raise usage.UsageError("Argument to --maildirdbmdomain must be of the form 'name=path'")
self.last_domain = maildir.MaildirDirdbmDomain(self.service, os.path.abspath(path))
self.service.addDomain(name, self.last_domain)
opt_d = opt_maildirdbmdomain
def opt_user(self, user_pass):
"""add a user/password to the last specified domains
"""
try:
user, password = user_pass.split('=', 1)
except ValueError:
raise usage.UsageError("Argument to --user must be of the form 'user=password'")
if self.last_domain:
self.last_domain.addUser(user, password)
else:
raise usage.UsageError("Specify a domain before specifying users")
opt_u = opt_user
def opt_bounce_to_postmaster(self):
"""undelivered mails are sent to the postmaster
"""
self.last_domain.postmaster = 1
opt_b = opt_bounce_to_postmaster
def opt_aliases(self, filename):
"""Specify an aliases(5) file to use for this domain"""
if self.last_domain is not None:
if mail.IAliasableDomain.providedBy(self.last_domain):
aliases = alias.loadAliasFile(self.service.domains, filename)
self.last_domain.setAliasGroup(aliases)
self.service.monitor.monitorFile(
filename,
AliasUpdater(self.service.domains, self.last_domain)
)
else:
raise usage.UsageError(
"%s does not support alias files" % (
self.last_domain.__class__.__name__,
)
)
else:
raise usage.UsageError("Specify a domain before specifying aliases")
opt_A = opt_aliases
def postOptions(self):
if self['pop3s']:
if not self['certificate']:
raise usage.UsageError("Cannot specify --pop3s without "
"--certificate")
elif not os.path.exists(self['certificate']):
raise usage.UsageError("Certificate file %r does not exist."
% self['certificate'])
if not self['disable-anonymous']:
self.service.smtpPortal.registerChecker(checkers.AllowAnonymousAccess())
if not (self['pop3'] or self['smtp'] or self['pop3s']):
raise usage.UsageError("You cannot disable all protocols")
class AliasUpdater:
def __init__(self, domains, domain):
self.domains = domains
self.domain = domain
def __call__(self, new):
self.domain.setAliasGroup(alias.loadAliasFile(self.domains, new))
def makeService(config):
if config['esmtp']:
rmType = relaymanager.SmartHostESMTPRelayingManager
smtpFactory = config.service.getESMTPFactory
else:
rmType = relaymanager.SmartHostSMTPRelayingManager
smtpFactory = config.service.getSMTPFactory
if config['relay']:
dir = config['relay']
if not os.path.isdir(dir):
os.mkdir(dir)
config.service.setQueue(relaymanager.Queue(dir))
default = relay.DomainQueuer(config.service)
manager = rmType(config.service.queue)
if config['esmtp']:
manager.fArgs += (None, None)
manager.fArgs += (config['hostname'],)
helper = relaymanager.RelayStateHelper(manager, 1)
helper.setServiceParent(config.service)
config.service.domains.setDefaultDomain(default)
ctx = None
if config['certificate']:
from twisted.mail.protocols import SSLContextFactory
ctx = SSLContextFactory(config['certificate'])
if config['pop3']:
s = internet.TCPServer(config['pop3'], config.service.getPOP3Factory())
s.setServiceParent(config.service)
if config['pop3s']:
s = internet.SSLServer(config['pop3s'],
config.service.getPOP3Factory(), ctx)
s.setServiceParent(config.service)
if config['smtp']:
f = smtpFactory()
f.context = ctx
if config['hostname']:
f.domain = config['hostname']
f.fArgs = (f.domain,)
if config['esmtp']:
f.fArgs = (None, None) + f.fArgs
s = internet.TCPServer(config['smtp'], f)
s.setServiceParent(config.service)
return config.service
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/wsgiref/validate.py | 92 | 15151 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .__next__() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def check_string_type(value, title):
if type (value) is str:
return value
raise AssertionError(
"{0} must be of type str (got {1})".format(title, repr(value)))
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will raise an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to raise an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, "Two arguments required")
assert_(not kw, "No keyword arguments allowed")
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % (args,)))
assert_(not kw, "No keyword arguments allowed")
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) == 1)
v = self.input.read(*args)
assert_(type(v) is bytes)
return v
def readline(self, *args):
assert_(len(args) <= 1)
v = self.input.readline(*args)
assert_(type(v) is bytes)
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is list)
for line in lines:
assert_(type(line) is bytes)
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, "input.close() must not be called")
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is str)
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, "errors.close() must not be called")
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is bytes)
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def __next__(self):
assert_(not self.closed,
"Iterator read after closed")
v = next(self.iterator)
if type(v) is not bytes:
assert_(False, "Iterator yielded non-bytestring (%r)" % (v,))
if self.check_start_response is not None:
assert_(self.check_start_response,
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert_(self.closed,
"Iterator garbage collected without being closed")
def check_environ(environ):
assert_(type(environ) is dict,
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert_(key in environ,
"Environment missing required key: %r" % (key,))
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert_(type(environ[key]) is str,
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is tuple,
"wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert_(not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/'),
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/'),
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_('PATH_INFO' in environ,
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
status = check_string_type(status, "Status")
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is list,
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is tuple,
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert_(len(item) == 2)
name, value = item
name = check_string_type(name, "Header name")
value = check_string_type(value, "Header value")
assert_(name.lower() != 'status',
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), "Bad header name: %r" % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, "Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
status = check_string_type(status, "Status")
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
for name, value in headers:
name = check_string_type(name, "Header name")
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0, ("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert_(0, "No Content-Type header found in headers (%s)" % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is tuple,
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a bytestring is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert_(not isinstance(iterator, (str, bytes)),
"You should not return a string as your application iterator, "
"instead return a single-item list containing a bytestring.")
| gpl-2.0 |
iofun/spider | spider/messages/campaigns.py | 1 | 4016 | # -*- coding: utf-8 -*-
'''
Spider campaign message models.
'''
# This file is part of spider.
# Distributed under the terms of the last AGPL License.
# The full license is in the file LICENCE, distributed as part of this software.
__author__ = 'Team Machine'
import uuid
from schematics import models
from schematics import types
from schematics.types import compound
from spider.messages import Resource
class Campaign(models.Model):
'''
Inbound Campaign
'''
uuid = types.UUIDType(default=uuid.uuid4)
account = types.StringType(required=True)
# campaign name and description
name = types.StringType()
description = types.StringType()
campaign_type = types.StringType()
ring_in_use = types.StringType()
record_audio = types.StringType()
join_empty = types.StringType()
max_len = types.StringType()
service_level = types.StringType()
wrap_up_time = types.StringType()
retry = types.StringType()
time_out = types.StringType()
# campaign strategy
strategy = types.StringType(default='ringall',
choices=['ringall',
'rrmemory',
'leastrecent',
'fewestcalls',
'random'])
status = types.StringType(default='inactive')
active = types.BooleanType(default=False)
paused = types.BooleanType(default=False)
# campaign has script
has_script = types.BooleanType(default=False)
script = types.StringType()
# for now default callerid is anonymous / private
callerid = types.StringType(default='anonymous')
extension = types.StringType()
# SIP gateway configuration stuff
gateway = types.StringType()
gateway_uuid = types.UUIDType()
gateway_prefix = types.StringType()
# the timezone setup of this campaign
timezone = types.StringType()
# start and end dates of the campaign
start_date = types.DateTimeType()
end_date = types.DateTimeType()
# same with times
start_time = types.StringType()
stop_time = types.StringType()
# missing weekdays and stuff.
# -- resources
resources = compound.ModelType(Resource)
class ModifyCampaign(models.Model):
'''
Modify campaign
This model is similar to Campaign.
It lacks of require and default values on it's fields.
The reason of it existence is that we need to validate
every input data that came from outside the system, with
this we prevent users from using PATCH to create fields
outside the scope of the resource.
'''
uuid = types.UUIDType()
account = types.StringType()
# campaign name and description
name = types.StringType()
description = types.StringType()
campaign_type = types.StringType()
ring_in_use = types.StringType()
record_audio = types.StringType()
join_empty = types.StringType()
max_len = types.StringType()
service_level = types.StringType()
wrap_up_time = types.StringType()
retry = types.StringType()
time_out = types.StringType()
# campaign strategy
strategy = types.StringType()
status = types.StringType()
active = types.BooleanType()
paused = types.BooleanType()
# campaign has script
has_script = types.BooleanType()
script = types.StringType()
callerid = types.StringType()
extension = types.StringType()
# SIP gateway configuration stuff
gateway = types.StringType()
gateway_uuid = types.UUIDType()
gateway_prefix = types.StringType()
# the timezone setup of this campaign
timezone = types.StringType()
# start and end dates of the campaign
start_date = types.DateTimeType()
end_date = types.DateTimeType()
# same with times
start_time = types.StringType()
stop_time = types.StringType()
# -- resources
resources = compound.ModelType(Resource) | agpl-3.0 |
felipecocco/oppia | core/storage/base_model/gae_models.py | 6 | 17216 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class."""
__author__ = 'Sean Lip'
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
import feconf
import utils
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created.
created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
# When this entity was last updated.
last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)
# Whether the current version of the file is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id. Fails noisily if strict == True.
Args:
entity_id: str. The id of the entity.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore.
Returns:
None, if strict == False and no undeleted entity with the given id
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
- base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
def put(self):
super(BaseModel, self).put()
@classmethod
def get_multi(cls, entity_ids, include_deleted=False):
"""Returns a list, each entry of which is the instance model
corresponding to the entity_id, except for the following two cases (in
which the corresponding entry is None instead):
- the instance is not found
- the instance has been deleted, and `include_deleted` is True.
"""
entity_keys = [ndb.Key(cls, entity_id) for entity_id in entity_ids]
entities = ndb.get_multi(entity_keys)
if not include_deleted:
for i in xrange(len(entities)):
if entities[i] and entities[i].deleted:
entities[i] = None
return entities
@classmethod
def put_multi(cls, entities):
return ndb.put_multi(entities)
def delete(self):
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted_entities=False):
"""Returns a filterable iterable of all entities of this class.
If include_deleted_entities is True then entities that have been marked
deleted are returned as well.
"""
query = cls.query()
if not include_deleted_entities:
query = query.filter(cls.deleted == False)
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: the name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str: a new unique id for this entity class.
Raises:
- Exception: if an id cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode('utf-8')
except Exception:
entity_name = ''
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
ID_LENGTH = 12
for i in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
@classmethod
def _fetch_page_sorted_by_last_updated(
cls, query, page_size, urlsafe_start_cursor):
if urlsafe_start_cursor:
start_cursor = datastore_query.Cursor(urlsafe=urlsafe_start_cursor)
else:
start_cursor = None
result = query.order(-cls.last_updated).fetch_page(
page_size, start_cursor=start_cursor)
return (
result[0],
(result[1].urlsafe() if result[1] else None),
result[2])
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
### IMPORTANT: Subclasses should only overwrite things above this line. ###
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (a Python dict) from the model fields."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Makes this instance into a reconstitution of the given snapshot."""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Makes this instance into a reconstitution of the given snapshot."""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
return self._reconstitute(snapshot_dict)
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
return '%s%s%s' % (
instance_id, cls._VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
if not isinstance(commit_cmds, list):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
for item in commit_cmds:
if not isinstance(item, dict):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
self.version += 1
snapshot = self._compute_snapshot()
snapshot_id = self._get_snapshot_id(self.id, self.version)
snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS(
id=snapshot_id, committer_id=committer_id, commit_type=commit_type,
commit_message=commit_message, commit_cmds=commit_cmds)
snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS(
id=snapshot_id, content=snapshot)
transaction_services.run_in_transaction(
ndb.put_multi,
[snapshot_metadata_instance, snapshot_content_instance, self])
def delete(self, committer_id, commit_message, force_deletion=False):
if force_deletion:
current_version = self.version
version_numbers = [str(num + 1) for num in range(current_version)]
snapshot_ids = [
self._get_snapshot_id(self.id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(metadata_keys)
content_keys = [
ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(content_keys)
super(VersionedModel, self).delete()
else:
self._require_not_marked_deleted()
self.deleted = True
CMD_DELETE = '%s_mark_deleted' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_DELETE
}]
self._trusted_commit(
committer_id, self._COMMIT_TYPE_DELETE, commit_message,
commit_cmds)
def put(self, *args, **kwargs):
"""For VersionedModels, this method is replaced with commit()."""
raise NotImplementedError
def commit(self, committer_id, commit_message, commit_cmds):
"""Saves a version snapshot and updates the model.
commit_cmds should give sufficient information to reconstruct the
commit.
"""
self._require_not_marked_deleted()
for commit_cmd in commit_cmds:
if 'cmd' not in commit_cmd:
raise Exception(
'Invalid commit_cmd: %s. Expected a \'cmd\' key.'
% commit_cmd)
if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX):
raise Exception(
'Invalid change list command: ' % commit_cmd['cmd'])
commit_type = (
self._COMMIT_TYPE_CREATE if self.version == 0 else
self._COMMIT_TYPE_EDIT)
self._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
def revert(self, committer_id, commit_message, version_number):
self._require_not_marked_deleted()
if not self.ALLOW_REVERT:
raise Exception(
'Reverting of objects of type %s is not allowed.'
% self.__class__.__name__)
CMD_REVERT = '%s_revert_version_number' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_REVERT,
'version_number': version_number
}]
# Do not overwrite the version number.
current_version = self.version
snapshot_id = self._get_snapshot_id(self.id, version_number)
self._reconstitute_from_snapshot_id(snapshot_id)
self.version = current_version
self._trusted_commit(
committer_id, self._COMMIT_TYPE_REVERT, commit_message,
commit_cmds)
@classmethod
def get_version(cls, model_instance_id, version_number):
"""Returns a model instance representing the given version.
The snapshot content is used to populate this model instance. The
snapshot metadata is not used.
"""
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_id = cls._get_snapshot_id(model_instance_id, version_number)
return cls(id=model_instance_id)._reconstitute_from_snapshot_id(
snapshot_id)
@classmethod
def get(cls, entity_id, strict=True, version=None):
"""Gets an entity by id. Fails noisily if strict == True."""
if version is None:
return super(VersionedModel, cls).get(entity_id, strict=strict)
else:
return cls.get_version(entity_id, version)
@classmethod
def get_snapshots_metadata(
cls, model_instance_id, version_numbers, allow_deleted=False):
"""Returns a list of dicts, each representing a model snapshot.
One dict is returned for each version number in the list of version
numbers requested. If any of the version numbers does not exist, an
error is raised.
If `allow_deleted` is False, an error is raised if the current model
has been deleted.
"""
if not allow_deleted:
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_ids = [
cls._get_snapshot_id(model_instance_id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
returned_models = ndb.get_multi(metadata_keys)
for ind, model in enumerate(returned_models):
if model is None:
raise Exception(
'Invalid version number %s for model %s with id %s'
% (version_numbers[ind], cls.__name__, model_instance_id))
return [{
'committer_id': model.committer_id,
'commit_message': model.commit_message,
'commit_cmds': model.commit_cmds,
'commit_type': model.commit_type,
'version_number': version_numbers[ind],
'created_on': model.created_on.strftime(
feconf.HUMAN_READABLE_DATETIME_FORMAT),
} for (ind, model) in enumerate(returned_models)]
class BaseSnapshotMetadataModel(BaseModel):
"""Base class for snapshot metadata classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The id of the user who committed this revision.
committer_id = ndb.StringProperty(required=True)
# The type of the commit associated with this snapshot.
commit_type = ndb.StringProperty(
required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES)
# The commit message associated with this snapshot.
commit_message = ndb.TextProperty(indexed=False)
# A sequence of commands that can be used to describe this commit.
# Represented as a list of dicts.
commit_cmds = ndb.JsonProperty(indexed=False)
class BaseSnapshotContentModel(BaseModel):
"""Base class for snapshot content classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The snapshot content, as a JSON blob.
content = ndb.JsonProperty(indexed=False)
class BaseMapReduceBatchResultsModel(BaseModel):
"""Base model for batch storage for MR jobs.
This model turns off caching, because this results in stale data being
shown after each MapReduce job run. Classes which are used by a MR job to
store its batch results should subclass this class.
"""
_use_cache = False
_use_memcache = False
| apache-2.0 |
PepperPD/edx-pepper-platform | lms/djangoapps/user_api/views.py | 17 | 1781 | from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import filters
from rest_framework import permissions
from rest_framework import viewsets
from user_api.models import UserPreference
from user_api.serializers import UserSerializer, UserPreferenceSerializer
class ApiKeyHeaderPermission(permissions.BasePermission):
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
If settings.DEBUG is True and settings.EDX_API_KEY is not set or None,
then allow the request. Otherwise, allow the request if and only if
settings.EDX_API_KEY is set and the X-Edx-Api-Key HTTP header is
present in the request and matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
return (
(settings.DEBUG and api_key is None) or
(api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key)
)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all()
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
| agpl-3.0 |
Lujeni/ansible | lib/ansible/module_utils/facts/network/base.py | 88 | 2400 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Network:
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
# TODO: more or less abstract/NotImplemented
def populate(self, collected_facts=None):
return {}
class NetworkCollector(BaseFactCollector):
# MAYBE: we could try to build this based on the arch specific implementation of Network() or its kin
name = 'network'
_fact_class = Network
_fact_ids = set(['interfaces',
'default_ipv4',
'default_ipv6',
'all_ipv4_addresses',
'all_ipv6_addresses'])
IPV6_SCOPE = {'0': 'global',
'10': 'host',
'20': 'link',
'40': 'admin',
'50': 'site',
'80': 'organization'}
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| gpl-3.0 |
kalvdans/scipy | scipy/signal/tests/mpsig.py | 39 | 3369 | """
Some signal functions implemented using mpmath.
"""
from __future__ import division
try:
import mpmath
except ImportError:
mpmath = None
def _prod(seq):
"""Returns the product of the elements in the sequence `seq`."""
p = 1
for elem in seq:
p *= elem
return p
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles.
This is simply len(p) - len(z), which must be nonnegative.
A ValueError is raised if len(p) < len(z).
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
return degree
def _zpkbilinear(z, p, k, fs):
"""Bilinear transformation to convert a filter from analog to digital."""
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z.extend([-1] * degree)
# Compensate for gain change
numer = _prod(fs2 - z1 for z1 in z)
denom = _prod(fs2 - p1 for p1 in p)
k_z = k * numer / denom
return z_z, p_z, k_z.real
def _zpklp2lp(z, p, k, wo=1):
"""Transform a lowpass filter to a different cutoff frequency."""
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = [wo * z1 for z1 in z]
p_lp = [wo * p1 for p1 in p]
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _butter_analog_poles(n):
"""
Poles of an analog Butterworth lowpass filter.
This is the same calculation as scipy.signal.buttap(n) or
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
and only the poles are returned.
"""
poles = []
for k in range(-n+1, n, 2):
poles.append(-mpmath.exp(1j*mpmath.pi*k/(2*n)))
return poles
def butter_lp(n, Wn):
"""
Lowpass Butterworth digital filter design.
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
but it uses mpmath, and the results are returned in lists instead of numpy
arrays.
"""
zeros = []
poles = _butter_analog_poles(n)
k = 1
fs = 2
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
z, p, k = _zpkbilinear(z, p, k, fs=fs)
return z, p, k
def zpkfreqz(z, p, k, worN=None):
"""
Frequency response of a filter in zpk format, using mpmath.
This is the same calculation as scipy.signal.freqz, but the input is in
zpk format, the calculation is performed using mpath, and the results are
returned in lists instead of numpy arrays.
"""
if worN is None or isinstance(worN, int):
N = worN or 512
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp(1j * wk)
numer = _prod([zm1 - t for t in z])
denom = _prod([zm1 - t for t in p])
hk = k * numer / denom
h.append(hk)
return ws, h
| bsd-3-clause |
openstack/nova | nova/tests/unit/objects/test_service.py | 3 | 25869 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from nova.compute import manager as compute_manager
from nova import context
from nova.db import api as db
from nova import exception
from nova import objects
from nova.objects import aggregate
from nova.objects import service
from nova import test
from nova.tests import fixtures
from nova.tests.unit.objects import test_compute_node
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
def _fake_service(**kwargs):
fake_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.service,
'host': 'fake-host',
'binary': 'nova-compute',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': service.SERVICE_VERSION,
}
fake_service.update(kwargs)
return fake_service
fake_service = _fake_service()
OPTIONAL = ['availability_zone', 'compute_node']
class _TestServiceObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.assertJsonEqual(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
obj_val = obj_val.obj_to_primitive()
self.assertJsonEqual(expected, obj_val)
def comparators(self):
return {'stats': self.assertJsonEqual,
'host_ip': self.assertJsonEqual,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def _test_query(self, db_method, obj_method, *args, **kwargs):
db_exception = kwargs.pop('db_exception', None)
if db_exception:
with mock.patch.object(db, db_method, side_effect=db_exception) \
as mock_db_method:
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
self.assertIsNone(obj)
mock_db_method.assert_called_once_with(self.context, *args,
**kwargs)
else:
with mock.patch.object(db, db_method, return_value=fake_service) \
as mock_db_method:
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
mock_db_method.assert_called_once_with(self.context, *args,
**kwargs)
def test_get_by_id(self):
self._test_query('service_get', 'get_by_id', 123)
def test_get_by_uuid(self):
self._test_query('service_get_by_uuid', 'get_by_uuid',
uuidsentinel.service_uuid)
def test_get_by_host_and_topic(self):
self._test_query('service_get_by_host_and_topic',
'get_by_host_and_topic', 'fake-host', 'fake-topic')
def test_get_by_host_and_binary(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary')
def test_get_by_host_and_binary_raises(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary',
db_exception=exception.HostBinaryNotFound(
host='fake-host', binary='fake-binary'))
def test_get_by_compute_host(self):
self._test_query('service_get_by_compute_host', 'get_by_compute_host',
'fake-host')
def test_get_by_args(self):
self._test_query('service_get_by_host_and_binary', 'get_by_args',
'fake-host', 'fake-binary')
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_create(self, mock_service_create):
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.uuid = uuidsentinel.service2
service_obj.create()
self.assertEqual(fake_service['id'], service_obj.id)
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
mock_service_create.assert_called_once_with(
self.context, {'host': 'fake-host',
'uuid': uuidsentinel.service2,
'version': fake_service['version']})
@mock.patch('nova.objects.service.uuidutils.generate_uuid',
return_value=uuidsentinel.service3)
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_create_without_uuid_generates_one(
self, mock_service_create, generate_uuid):
service_obj = service.Service(context=self.context)
service_obj.create()
create_args = mock_service_create.call_args[0][1]
self.assertEqual(generate_uuid.return_value, create_args['uuid'])
@mock.patch.object(db, 'service_create', return_value=fake_service)
def test_recreate_fails(self, mock_service_create):
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.create()
self.assertRaises(exception.ObjectActionError, service_obj.create)
mock_service_create(self.context, {'host': 'fake-host',
'version': fake_service['version']})
@mock.patch('nova.objects.Service._send_notification')
@mock.patch.object(db, 'service_update', return_value=fake_service)
def test_save(self, mock_service_update, mock_notify):
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.host = 'fake-host'
service_obj.save()
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
mock_service_update.assert_called_once_with(
self.context, 123, {'host': 'fake-host',
'version': fake_service['version']})
@mock.patch.object(db, 'service_create',
return_value=fake_service)
def test_set_id_failure(self, db_mock):
service_obj = service.Service(context=self.context,
binary='nova-compute')
service_obj.create()
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
service_obj, 'id', 124)
@mock.patch('nova.objects.Service._send_notification')
@mock.patch.object(db, 'service_destroy')
def _test_destroy(self, mock_service_destroy, mock_notify):
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.destroy()
mock_service_destroy.assert_called_once_with(self.context, 123)
def test_destroy(self):
# The test harness needs db.service_destroy to work,
# so avoid leaving it broken here after we're done
orig_service_destroy = db.service_destroy
try:
self._test_destroy()
finally:
db.service_destroy = orig_service_destroy
@mock.patch.object(db, 'service_get_all_by_topic',
return_value=[fake_service])
def test_get_by_topic(self, mock_service_get):
services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_service_get.assert_called_once_with(self.context, 'fake-topic')
@mock.patch('nova.db.api.service_get_all_by_binary')
def test_get_by_binary(self, mock_get):
mock_get.return_value = [fake_service]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary')
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=False)
@mock.patch('nova.db.api.service_get_all_by_binary')
def test_get_by_binary_disabled(self, mock_get):
mock_get.return_value = [_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
@mock.patch('nova.db.api.service_get_all_by_binary')
def test_get_by_binary_both(self, mock_get):
mock_get.return_value = [_fake_service(),
_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(2, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
@mock.patch.object(db, 'service_get_all_by_host',
return_value=[fake_service])
def test_get_by_host(self, mock_service_get):
services = service.ServiceList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_service_get.assert_called_once_with(self.context, 'fake-host')
@mock.patch.object(db, 'service_get_all', return_value=[fake_service])
def test_get_all(self, mock_get_all):
services = service.ServiceList.get_all(self.context, disabled=False)
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_get_all.assert_called_once_with(self.context, disabled=False)
@mock.patch.object(db, 'service_get_all')
@mock.patch.object(aggregate.AggregateList, 'get_by_metadata_key')
def test_get_all_with_az(self, mock_get_by_key, mock_get_all):
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'availability_zone': 'test-az'}
agg.create()
agg.hosts = [fake_service['host']]
mock_get_by_key.return_value = [agg]
mock_get_all.return_value = [dict(fake_service, topic='compute')]
services = service.ServiceList.get_all(self.context, set_zones=True)
self.assertEqual(1, len(services))
self.assertEqual('test-az', services[0].availability_zone)
mock_get_all.assert_called_once_with(self.context, disabled=None)
mock_get_by_key.assert_called_once_with(self.context,
'availability_zone', hosts=set(agg.hosts))
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
def test_compute_node(self, mock_get):
fake_compute_node = objects.ComputeNode._from_db_object(
self.context, objects.ComputeNode(),
test_compute_node.fake_compute_node)
mock_get.return_value = [fake_compute_node]
service_obj = service.Service(id=123, host="fake-host",
binary="nova-compute")
service_obj._context = self.context
self.assertEqual(service_obj.compute_node,
fake_compute_node)
# Make sure it doesn't re-fetch this
service_obj.compute_node
mock_get.assert_called_once_with(self.context, 'fake-host')
@mock.patch.object(db, 'service_get_all_computes_by_hv_type')
def test_get_all_computes_by_hv_type(self, mock_get_all):
mock_get_all.return_value = [fake_service]
services = service.ServiceList.get_all_computes_by_hv_type(
self.context, 'hv-type')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
mock_get_all.assert_called_once_with(self.context, 'hv-type',
include_disabled=False)
def test_load_when_orphaned(self):
service_obj = service.Service()
service_obj.id = 123
self.assertRaises(exception.OrphanedObjectError,
getattr, service_obj, 'compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
def test_obj_make_compatible_for_compute_node(self, get_all_by_host):
service_obj = objects.Service(context=self.context)
fake_service_dict = fake_service.copy()
fake_compute_obj = objects.ComputeNode(host=fake_service['host'],
service_id=fake_service['id'])
get_all_by_host.return_value = [fake_compute_obj]
versions = ovo_base.obj_tree_get_versions('Service')
versions['ComputeNode'] = '1.10'
service_obj.obj_make_compatible_from_manifest(fake_service_dict, '1.9',
versions)
self.assertEqual(
fake_compute_obj.obj_to_primitive(target_version='1.10',
version_manifest=versions),
fake_service_dict['compute_node'])
@mock.patch('nova.db.api.service_get_minimum_version')
def test_get_minimum_version_none(self, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
@mock.patch('nova.db.api.service_get_minimum_version')
def test_get_minimum_version(self, mock_get):
mock_get.return_value = {'nova-compute': 123}
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
@mock.patch('nova.db.api.service_get_minimum_version')
@mock.patch('nova.objects.service.LOG')
def test_get_minimum_version_checks_binary(self, mock_log, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertFalse(mock_log.warning.called)
self.assertRaises(exception.ObjectActionError,
objects.Service.get_minimum_version,
self.context,
'compute')
self.assertTrue(mock_log.warning.called)
@mock.patch('nova.db.api.service_get_minimum_version')
def test_get_minimum_version_with_caching(self, mock_get):
objects.Service.enable_min_version_cache()
mock_get.return_value = {'nova-compute': 123}
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertEqual({"nova-compute": 123},
objects.Service._MIN_VERSION_CACHE)
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, ['nova-compute'])
objects.Service._SERVICE_VERSION_CACHING = False
objects.Service.clear_min_version_cache()
@mock.patch('nova.db.api.service_get_minimum_version')
def test_get_min_version_multiple_with_old(self, mock_gmv):
mock_gmv.return_value = {'nova-api': None,
'nova-scheduler': 2,
'nova-conductor': 3}
binaries = ['nova-api', 'nova-api', 'nova-conductor',
'nova-conductor', 'nova-api']
minimum = objects.Service.get_minimum_version_multi(self.context,
binaries)
self.assertEqual(0, minimum)
@mock.patch('nova.db.api.service_get_minimum_version')
def test_get_min_version_multiple(self, mock_gmv):
mock_gmv.return_value = {'nova-api': 1,
'nova-scheduler': 2,
'nova-conductor': 3}
binaries = ['nova-api', 'nova-api', 'nova-conductor',
'nova-conductor', 'nova-api']
minimum = objects.Service.get_minimum_version_multi(self.context,
binaries)
self.assertEqual(1, minimum)
@mock.patch('nova.objects.Service._send_notification')
@mock.patch('nova.db.api.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_above_minimum(self, mock_get, mock_notify):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=3):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.objects.Service._send_notification')
@mock.patch('nova.db.api.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_equal_to_minimum(self, mock_get, mock_notify):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=2):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.db.api.service_get_minimum_version',
return_value={'nova-compute': 2})
def test_create_below_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=1):
self.assertRaises(exception.ServiceTooOld,
objects.Service(context=self.context,
binary='nova-compute',
).create)
@mock.patch('nova.objects.base.NovaObject'
'.obj_make_compatible_from_manifest', new=mock.Mock())
def test_obj_make_compatible_from_manifest_strips_uuid(self):
s = service.Service()
primitive = {'uuid': uuidsentinel.service}
s.obj_make_compatible_from_manifest(primitive, '1.20', mock.Mock())
self.assertNotIn('uuid', primitive)
class TestServiceObject(test_objects._LocalTest,
_TestServiceObject):
pass
class TestRemoteServiceObject(test_objects._RemoteTest,
_TestServiceObject):
pass
class TestServiceVersion(test.NoDBTestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
super(TestServiceVersion, self).setUp()
def _collect_things(self):
data = {
'compute_rpc': compute_manager.ComputeManager.target.version,
}
return data
def test_version(self):
calculated = self._collect_things()
self.assertEqual(
len(service.SERVICE_VERSION_HISTORY), service.SERVICE_VERSION + 1,
'Service version %i has no history. Please update '
'nova.objects.service.SERVICE_VERSION_HISTORY '
'and add %s to it' % (service.SERVICE_VERSION, repr(calculated)))
current = service.SERVICE_VERSION_HISTORY[service.SERVICE_VERSION]
self.assertEqual(
current, calculated,
'Changes detected that require a SERVICE_VERSION change. Please '
'increment nova.objects.service.SERVICE_VERSION, and make sure it '
'is equal to nova.compute.manager.ComputeManager.target.version.')
def test_version_in_init(self):
self.assertRaises(exception.ObjectActionError,
objects.Service,
version=123)
def test_version_set_on_init(self):
self.assertEqual(service.SERVICE_VERSION,
objects.Service().version)
def test_version_loaded_from_db(self):
fake_version = fake_service['version'] + 1
fake_different_service = dict(fake_service)
fake_different_service['version'] = fake_version
obj = objects.Service()
obj._from_db_object(self.ctxt, obj, fake_different_service)
self.assertEqual(fake_version, obj.version)
class TestServiceVersionCells(test.TestCase):
def setUp(self):
self.context = context.get_admin_context()
super(TestServiceVersionCells, self).setUp()
def _setup_cells(self):
# NOTE(danms): Override the base class's cell setup so we can have two
self.cells = fixtures.CellDatabases()
self.cells.add_cell_database(uuidsentinel.cell1, default=True)
self.cells.add_cell_database(uuidsentinel.cell2)
self.useFixture(self.cells)
cm = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell1,
name='cell1',
transport_url='fake://nowhere/',
database_connection=uuidsentinel.cell1)
cm.create()
cm = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell2,
name='cell2',
transport_url='fake://nowhere/',
database_connection=uuidsentinel.cell2)
cm.create()
def _create_services(self, *versions):
cells = objects.CellMappingList.get_all(self.context)
index = 0
for version in versions:
service = objects.Service(context=self.context,
binary='nova-compute')
service.version = version
cell = cells[index % len(cells)]
with context.target_cell(self.context, cell):
service.create()
index += 1
@mock.patch('nova.objects.Service._send_notification')
@mock.patch('nova.objects.Service._check_minimum_version')
def test_version_all_cells(self, mock_check, mock_notify):
self._create_services(16, 16, 13, 16)
self.assertEqual(13, service.get_minimum_version_all_cells(
self.context, ['nova-compute']))
@mock.patch('nova.objects.service.LOG')
def test_get_minimum_version_checks_binary(self, mock_log):
ex = self.assertRaises(exception.ObjectActionError,
service.get_minimum_version_all_cells,
self.context, ['compute'])
self.assertIn('Invalid binary prefix', str(ex))
self.assertTrue(mock_log.warning.called)
@mock.patch('nova.context.scatter_gather_all_cells')
def test_version_all_cells_with_fail(self, mock_scatter):
mock_scatter.return_value = {
'foo': {'nova-compute': 13},
'bar': exception.ServiceNotFound(service_id='fake'),
}
self.assertEqual(13, service.get_minimum_version_all_cells(
self.context, ['nova-compute']))
self.assertRaises(exception.CellTimeout,
service.get_minimum_version_all_cells,
self.context, ['nova-compute'],
require_all=True)
@mock.patch('nova.context.scatter_gather_all_cells')
def test_version_all_cells_with_timeout(self, mock_scatter):
mock_scatter.return_value = {
'foo': {'nova-compute': 13},
'bar': context.did_not_respond_sentinel,
}
self.assertEqual(13, service.get_minimum_version_all_cells(
self.context, ['nova-compute']))
self.assertRaises(exception.CellTimeout,
service.get_minimum_version_all_cells,
self.context, ['nova-compute'],
require_all=True)
@mock.patch('nova.context.scatter_gather_all_cells')
def test_version_all_cells_exclude_zero_service(self, mock_scatter):
mock_scatter.return_value = {
'foo': {'nova-compute': 13},
'bar': {'nova-compute': 0},
}
self.assertEqual(13, service.get_minimum_version_all_cells(
self.context, ['nova-compute']))
| apache-2.0 |
chromium/chromium | tools/json_schema_compiler/js_externs_generator.py | 5 | 8348 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Generator that produces an externs file for the Closure Compiler.
Note: This is a work in progress, and generated externs may require tweaking.
See https://developers.google.com/closure/compiler/docs/api-tutorial3#externs
"""
from code import Code
from js_util import JsUtil
from model import *
from schema_util import *
import os
import sys
import re
NOTE = """// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.%s.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/main/docs/closure_compilation.md
"""
class JsExternsGenerator(object):
def Generate(self, namespace):
return _Generator(namespace).Generate()
class _Generator(object):
def __init__(self, namespace):
self._namespace = namespace
self._class_name = None
self._js_util = JsUtil()
def Generate(self):
"""Generates a Code object with the schema for the entire namespace.
"""
c = Code()
# /abs/path/src/tools/json_schema_compiler/
script_dir = os.path.dirname(os.path.abspath(__file__))
# /abs/path/src/
src_root = os.path.normpath(os.path.join(script_dir, '..', '..'))
# tools/json_schema_compiler/
src_to_script = os.path.relpath(script_dir, src_root)
# tools/json_schema_compiler/compiler.py
compiler_path = os.path.join(src_to_script, 'compiler.py')
(c.Append(self._GetHeader(compiler_path, self._namespace.name))
.Append())
self._AppendNamespaceObject(c)
for js_type in self._namespace.types.values():
self._AppendType(c, js_type)
for prop in self._namespace.properties.values():
self._AppendProperty(c, prop)
for function in self._namespace.functions.values():
self._AppendFunction(c, function)
for event in self._namespace.events.values():
self._AppendEvent(c, event)
c.TrimTrailingNewlines()
return c
def _GetHeader(self, tool, namespace):
"""Returns the file header text.
"""
return (self._js_util.GetLicense() + '\n' +
self._js_util.GetInfo(tool) + (NOTE % namespace) + '\n' +
('/** @fileoverview Externs generated from namespace: %s */' %
namespace))
def _AppendType(self, c, js_type):
"""Given a Type object, generates the Code for this type's definition.
"""
if js_type.property_type is PropertyType.ENUM:
self._AppendEnumJsDoc(c, js_type)
else:
self._AppendTypeJsDoc(c, js_type)
c.Append()
def _AppendEnumJsDoc(self, c, js_type):
""" Given an Enum Type object, generates the Code for the enum's definition.
"""
c.Sblock(line='/**', line_prefix=' * ')
c.Append('@enum {string}')
self._js_util.AppendSeeLink(c, self._namespace.name, 'type',
js_type.simple_name)
c.Eblock(' */')
c.Append('%s.%s = {' % (self._GetNamespace(), js_type.name))
def get_property_name(e):
# Enum properties are normified to be in ALL_CAPS_STYLE.
# Assume enum '1ring-rulesThemAll'.
# Transform to '1ring-rules_Them_All'.
e = re.sub(r'([a-z])([A-Z])', r'\1_\2', e)
# Transform to '1ring_rules_Them_All'.
e = re.sub(r'\W', '_', e)
# Transform to '_1ring_rules_Them_All'.
e = re.sub(r'^(\d)', r'_\1', e)
# Transform to '_1RING_RULES_THEM_ALL'.
return e.upper()
c.Append('\n'.join(
[" %s: '%s'," % (get_property_name(v.name), v.name)
for v in js_type.enum_values]))
c.Append('};')
def _IsTypeConstructor(self, js_type):
"""Returns true if the given type should be a @constructor. If this returns
false, the type is a typedef.
"""
return any(prop.type_.property_type is PropertyType.FUNCTION
for prop in js_type.properties.values())
def _AppendTypeJsDoc(self, c, js_type, optional=False):
"""Appends the documentation for a type as a Code.
"""
c.Sblock(line='/**', line_prefix=' * ')
if js_type.description:
for line in js_type.description.splitlines():
c.Append(line)
if js_type.jsexterns:
for line in js_type.jsexterns.splitlines():
c.Append(line)
is_constructor = self._IsTypeConstructor(js_type)
if js_type.property_type is not PropertyType.OBJECT:
self._js_util.AppendTypeJsDoc(c, self._namespace.name, js_type, optional)
elif is_constructor:
c.Comment('@constructor', comment_prefix = '', wrap_indent=4)
c.Comment('@private', comment_prefix = '', wrap_indent=4)
elif js_type.jsexterns is None:
self._AppendTypedef(c, js_type.properties)
self._js_util.AppendSeeLink(c, self._namespace.name, 'type',
js_type.simple_name)
c.Eblock(' */')
var = '%s.%s' % (self._GetNamespace(), js_type.simple_name)
if is_constructor: var += ' = function() {}'
var += ';'
c.Append(var)
if is_constructor:
c.Append()
self._class_name = js_type.name
for prop in js_type.properties.values():
if prop.type_.property_type is PropertyType.FUNCTION:
self._AppendFunction(c, prop.type_.function)
else:
self._AppendTypeJsDoc(c, prop.type_, prop.optional)
c.Append()
self._class_name = None
def _AppendTypedef(self, c, properties):
"""Given an OrderedDict of properties, Appends code containing a @typedef.
"""
c.Append('@typedef {')
if properties:
self._js_util.AppendObjectDefinition(
c, self._namespace.name, properties, new_line=False)
else:
c.Append('Object', new_line=False)
c.Append('}', new_line=False)
def _AppendProperty(self, c, prop):
"""Appends the code representing a top-level property, including its
documentation. For example:
/** @type {string} */
chrome.runtime.id;
"""
self._AppendTypeJsDoc(c, prop.type_, prop.optional)
c.Append()
def _AppendFunction(self, c, function):
"""Appends the code representing a function, including its documentation.
For example:
/**
* @param {string} title The new title.
*/
chrome.window.setTitle = function(title) {};
"""
self._js_util.AppendFunctionJsDoc(c, self._namespace.name, function)
params = self._GetFunctionParams(function)
c.Append('%s.%s = function(%s) {};' % (self._GetNamespace(),
function.name, params))
c.Append()
def _AppendEvent(self, c, event):
"""Appends the code representing an event.
For example:
/** @type {!ChromeEvent} */
chrome.bookmarks.onChildrenReordered;
"""
c.Sblock(line='/**', line_prefix=' * ')
if (event.description):
c.Comment(event.description, comment_prefix='')
c.Append('@type {!ChromeEvent}')
self._js_util.AppendSeeLink(c, self._namespace.name, 'event', event.name)
c.Eblock(' */')
c.Append('%s.%s;' % (self._GetNamespace(), event.name))
c.Append()
def _AppendNamespaceObject(self, c):
"""Appends the code creating namespace object.
For example:
/** @const */
chrome.bookmarks = {};
"""
c.Append('/** @const */')
c.Append('chrome.%s = {};' % self._namespace.name)
c.Append()
def _GetFunctionParams(self, function):
"""Returns the function params string for function.
"""
params = function.params[:]
param_names = [param.name for param in params]
# TODO(https://crbug.com/1142991): Update this to represent promises better,
# rather than just appended as a callback.
if function.returns_async:
param_names.append(function.returns_async.name)
return ', '.join(param_names)
def _GetNamespace(self):
"""Returns the namespace to be prepended to a top-level typedef.
For example, it might return "chrome.namespace".
Also optionally includes the class name if this is in the context
of outputting the members of a class.
For example, "chrome.namespace.ClassName.prototype"
"""
if self._class_name:
return 'chrome.%s.%s.prototype' % (self._namespace.name, self._class_name)
return 'chrome.%s' % self._namespace.name
| bsd-3-clause |
aron-bordin/Tyrant-Sql | SQL_Map/lib/techniques/dns/test.py | 8 | 1127 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import randomInt
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.exception import SqlmapNotVulnerableException
from lib.techniques.dns.use import dnsUse
def dnsTest(payload):
logger.info("testing for data retrieval through DNS channel")
randInt = randomInt()
kb.dnsTest = dnsUse(payload, "SELECT %d%s" % (randInt, FROM_DUMMY_TABLE.get(Backend.getIdentifiedDbms(), ""))) == str(randInt)
if not kb.dnsTest:
errMsg = "data retrieval through DNS channel failed"
if not conf.forceDns:
conf.dnsName = None
errMsg += ". Turning off DNS exfiltration support"
logger.error(errMsg)
else:
raise SqlmapNotVulnerableException(errMsg)
else:
infoMsg = "data retrieval through DNS channel was successful"
logger.info(infoMsg)
| gpl-3.0 |
pietern/caffe2 | caffe2/python/operator_test/elementwise_op_broadcast_test.py | 3 | 16095 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
# TODO(jiayq): make them hypothesis tests for better coverage.
class TestElementwiseBroadcast(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_broadcast_Add(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y[:, :, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(**hu.gcs)
def test_broadcast_Mul(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y[:, :, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X * Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(**hu.gcs)
def test_broadcast_Sub(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y[:, :, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X - Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(**hu.gcs)
def test_broadcast_scalar(self, gc, dc):
# broadcasting constant
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting scalar
X = np.random.rand(1).astype(np.float32)
Y = np.random.rand(1).astype(np.float32).reshape([])
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(**hu.gcs)
def test_semantic_broadcast(self, gc, dc):
# NCHW as default
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, axis_str="C")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y[:, np.newaxis, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# NHWC
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(5).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, axis_str="C", order="NHWC")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(**hu.gcs)
def test_sum_reduce(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=0)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2, 3).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=3)
res = np.sum(res, axis=2)
np.testing.assert_array_almost_equal(out, res, decimal=3)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 500).astype(np.float64)
Y = np.random.rand(1).astype(np.float64)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.array(np.sum(X))
np.testing.assert_array_almost_equal(out, res, decimal=0)
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 3, 4, 1).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=2).reshape(Y.shape)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# fp64 is not supported with the CUDA op
dc_cpu_only = [d for d in dc if d.device_type != caffe2_pb2.CUDA]
self.assertDeviceChecks(dc_cpu_only, op, [X, Y], [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(**hu.gcs_gpu_only)
def test_sum_reduce_fp16(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(4, 5).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, device_option=gc)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=0)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(2, 3).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0)
def ref_op(X, Y):
res = np.sum(X, axis=3)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(3, 4).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(1, 3, 4, 1).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res.reshape(Y.shape)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
lxybox1/MissionPlanner | Lib/encodings/iso8859_2.py | 93 | 13967 | """ Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON
u'\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON
u'\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON
u'\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
zachcp/qiime | tests/test_simsam.py | 15 | 96318 | #!/usr/bin/env python
# File created on 19 Mar 2011
from __future__ import division
__author__ = "Justin Kucyznski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kucyznski", "Jai Ram Rideout", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kucyznski"
__email__ = "justinak@gmail.com"
from os.path import exists
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from itertools import izip
from cogent.parse.tree import DndParser
from biom import load_table
from biom.parse import parse_biom_table
from biom.table import Table
from qiime.parse import parse_mapping_file
from qiime.util import get_qiime_temp_dir
import qiime.simsam
from tempfile import gettempdir, mkdtemp
import string
import random
import os
import shutil
import subprocess
import numpy
class SimsamTests(TestCase):
def setUp(self):
self.dirs_to_remove = []
tmp_dir = get_qiime_temp_dir()
self.test_out = mkdtemp(dir=tmp_dir,
prefix='qiime_parallel_tests_',
suffix='')
self.dirs_to_remove.append(self.test_out)
self.map_f = map_lines.split('\n')
self.otu_table = parse_biom_table(otu_table_lines.split('\n'))
self.tutorial_map = tutorial_map.split('\n')
self.tutorial_tree = DndParser(tutorial_tree)
self.tutorial_otu_table = parse_biom_table(
tutorial_otu_table.split('\n'))
def tearDown(self):
for d in self.dirs_to_remove:
if os.path.exists(d):
shutil.rmtree(d)
def test_create_tip_index(self):
"""Create a tip index at the root"""
t = DndParser("((a,b)c,(d,e)f)g;")
qiime.simsam.create_tip_index(t)
self.assertEqual({'a':t.getNodeMatchingName('a'),
'b':t.getNodeMatchingName('b'),
'd':t.getNodeMatchingName('d'),
'e':t.getNodeMatchingName('e')}, t._tip_index)
def test_cache_tip_names(self):
"""Cache tip names over the tree"""
t = DndParser("((a,b)c,(d,e)f)g;")
qiime.simsam.cache_tip_names(t)
self.assertEqual(t._tip_names, ['a', 'b', 'd', 'e'])
self.assertEqual(t.Children[0]._tip_names, ['a', 'b'])
self.assertEqual(t.Children[1]._tip_names, ['d', 'e'])
self.assertEqual(t.Children[0].Children[0]._tip_names, ['a'])
self.assertEqual(t.Children[0].Children[1]._tip_names, ['b'])
self.assertEqual(t.Children[1].Children[0]._tip_names, ['d'])
self.assertEqual(t.Children[1].Children[1]._tip_names, ['e'])
def test_script(self):
""" test the whole simsam script
"""
tempdir = get_qiime_temp_dir()
maindir = os.path.join(tempdir,
''.join(random.choice(string.ascii_letters + string.digits)
for x in range(10)))
os.makedirs(maindir)
self.dirs_to_remove.append(maindir)
otuf = os.path.join(maindir, 'otuf')
treef = os.path.join(maindir, 'treef')
otufh = open(otuf, 'w')
otufh.write(tutorial_otu_table)
otufh.close()
treefh = open(treef, 'w')
treefh.write(tutorial_tree)
treefh.close()
out_dir = os.path.join(maindir, 'simsam_out')
cmd = 'simsam.py -i %s -t %s -o %s -d .003 -n 3 ' % (otuf, treef,
out_dir)
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
scriptout, scripterr = proc.communicate()
if scriptout:
raise RuntimeError('script returned stdout: ' + scriptout)
if scripterr:
raise RuntimeError('script returned stderr: ' + scripterr)
num_replicates = 3 # ensure this matches cmd above
result_fp = os.path.join(out_dir, 'otuf_n%d_d0.003.biom' %
num_replicates)
res_table = load_table(result_fp)
orig_table = parse_biom_table(open(otuf, 'U'))
# 3 samples per input sample
self.assertEqual(
len(res_table.ids()),
num_replicates * len(orig_table.ids()))
# sample_ids have correct naming and order
for i in range(len(orig_table.ids())):
for j in range(num_replicates):
exp = orig_table.ids()[i] + '.' + str(j)
self.assertEqual(
res_table.ids()[i * num_replicates + j],
exp)
# same total sequences in each replicate sample
num_orig_samples = len(orig_table.ids())
orig_sams = orig_table.iter_data(axis='sample')
res_sams = res_table.iter_data(axis='sample')
for i in range(num_orig_samples):
orig_sam = orig_sams.next()
for j in range(num_replicates):
res_sam = res_sams.next()
self.assertEqual(res_sam.sum(), orig_sam.sum())
# would be nice to test that result otu table doesn't match input,
# but not sure how probable that is, and don't want stochastic failures
def test_script_nochange(self):
""" simsam script with 0 distance should just replicate input samples
"""
tempdir = get_qiime_temp_dir()
maindir = os.path.join(tempdir,
''.join(random.choice(string.ascii_letters + string.digits)
for x in range(10)))
os.makedirs(maindir)
self.dirs_to_remove.append(maindir)
otuf = os.path.join(maindir, 'otuf')
treef = os.path.join(maindir, 'treef')
otufh = open(otuf, 'w')
otufh.write(tutorial_otu_table)
otufh.close()
treefh = open(treef, 'w')
treefh.write(tutorial_tree)
treefh.close()
out_dir = os.path.join(maindir, 'simsam_out')
cmd = 'simsam.py -i %s -t %s -o %s -d 0 -n 3 ' % (otuf, treef,
out_dir)
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
scriptout, scripterr = proc.communicate()
if scriptout:
raise RuntimeError('script returned stdout: ' + scriptout)
if scripterr:
raise RuntimeError('script returned stderr: ' + scripterr)
num_replicates = 3 # ensure this matches cmd above
result_fp = os.path.join(out_dir, 'otuf_n%d_d0.0.biom' %
num_replicates)
res_table = load_table(result_fp)
orig_table = parse_biom_table(open(otuf, 'U'))
# 3 samples per input sample
self.assertEqual(
len(res_table.ids()),
num_replicates * len(orig_table.ids()))
# sample_ids have correct naming and order
for i in range(len(orig_table.ids())):
for j in range(num_replicates):
exp = orig_table.ids()[i] + '.' + str(j)
self.assertEqual(
res_table.ids()[i * num_replicates + j],
exp)
# same otu ids
self.assertEqual(res_table.ids(axis='observation').tolist(),
orig_table.ids(axis='observation').tolist())
# same otu table, just replicated thrice
# note this requires the same sorting of otus, input is correct sorting
num_orig_samples = len(orig_table.ids())
orig_sams = orig_table.iter_data(axis='sample')
res_sams = res_table.iter_data(axis='sample')
for i in range(num_orig_samples):
orig_sam = orig_sams.next()
for j in range(num_replicates):
res_sam = res_sams.next()
self.assertItemsEqual(res_sam, orig_sam)
def test_sim_otu_table(self):
""" simulated otu table should be right order, number of seqs
tree looks like:
/-A
|
---------|--B
|
| /-C
\--------|
\-D
"""
sample_ids = ['samB', 'samA']
otu_ids = ['C', 'A']
otu_mtx = numpy.array([[3, 9],
[5, 0],
])
otu_metadata = [{'tax': 'otu_C is cool'}, {'tax': ''}]
tree = DndParser("(A:0.1,B:0.2,(C:0.3,D:0.4):0.5);")
num_replicates = 3
dissimilarity = 0.15
rich_table = Table(otu_mtx, sample_ids, otu_ids,
observation_metadata=otu_metadata)
res_sam_names, res_otus, res_otu_mtx, res_otu_metadata = \
qiime.simsam.sim_otu_table(
sample_ids, otu_ids, rich_table.iter(axis='sample'), otu_metadata,
tree, num_replicates, dissimilarity)
# dissim is too small to change otu C, it should always be there
# with at least original # seqs, maybe more
c_index = res_otus.index('C')
c_row = res_otu_mtx[c_index]
self.assertEqual(res_otu_metadata[c_index], {'tax': 'otu_C is cool'})
for o, e in izip(c_row,[2 , 2, 2, 8, 8, 8]):
self.assertGreater(o, e)
# order of samples should remain the same as input,
# and eash replicate sample
# should have same number or sequences as input
for i in range(len(sample_ids)):
for j in range(num_replicates):
self.assertEqual(otu_mtx[:, i].sum(),
res_otu_mtx[:, num_replicates * i + j].sum())
def test_sim_otu_table_new_otus(self):
"""Test large dissim to obtain OTUs that weren't in original table."""
sample_ids = ['samB', 'samA']
otu_ids = ['C', 'A']
otu_mtx = numpy.array([[3, 9],
[5, 0],
])
otu_metadata = [{'tax': 'otu_C is cool'}, {'tax': ''}]
tree = DndParser("(A:0.1,B:0.2,(C:0.3,D:0.4):0.5);")
num_replicates = 3
# Huge dissimilarity to ensure we get new OTUs.
dissimilarity = 100000
rich_table = Table(otu_mtx, sample_ids, otu_ids,
observation_metadata=otu_metadata)
otu_id_results = []
otu_md_results = []
for i in range(1000):
res_sam_names, res_otus, res_otu_mtx, res_otu_metadata = \
qiime.simsam.sim_otu_table(sample_ids, otu_ids,
rich_table.iter(axis='sample'
), otu_metadata, tree,
num_replicates, dissimilarity)
otu_id_results.extend(res_otus)
otu_md_results.extend(res_otu_metadata)
# We should see all OTUs show up at least once.
self.assertTrue('A' in otu_id_results)
self.assertTrue('B' in otu_id_results)
self.assertTrue('C' in otu_id_results)
self.assertTrue('D' in otu_id_results)
# We should see at least one blank metadata entry since A and B are not
# in the original table.
self.assertTrue(None in otu_md_results)
def test_get_new_otu_id_small(self):
""" small dissim should return old tip id"""
tree = DndParser("(A:0.1,B:0.2,(C:0.3,D:0.4):0.5);")
res = qiime.simsam.get_new_otu_id(
old_otu_id='A',
tree=tree,
dissim=.05)
self.assertEqual(res, 'A')
def test_get_new_otu_id_large(self):
""" w/ large dissim, should at least sometimes return other tip"""
tree = DndParser("(A:0.1,B:0.2,(C:0.3,D:0.4):0.5);")
results = []
for i in range(1000):
results.append(qiime.simsam.get_new_otu_id(old_otu_id='D',
tree=tree, dissim=.6))
self.assertTrue('C' in results)
self.assertTrue('D' in results)
self.assertTrue('A' not in results)
self.assertTrue('B' not in results)
def test_combine_sample_dicts(self):
""" combining sample dicts should give correct otu table and sorting
"""
d1 = {'otu2': 0, 'otu1': 3}
d2 = {'otu4': 5}
d3 = {}
res_otu_mtx, res_otu_ids = qiime.simsam.combine_sample_dicts(
[d1, d2, d3])
exp_otu_ids = ['otu1', 'otu2', 'otu4']
exp_otu_mtx = numpy.array([[3, 0, 0],
[0, 0, 0],
[0, 5, 0],
])
self.assertEqual(res_otu_ids, exp_otu_ids)
assert_almost_equal(res_otu_mtx, exp_otu_mtx)
def test_create_replicated_mapping_file(self):
"""Test creating replicate samples in a mapping file."""
# 3 replicates, with two extra samples in the mapping file.
obs = qiime.simsam.create_replicated_mapping_file(self.map_f, 3,
self.otu_table.ids())
self.assertEqual(obs, exp_rep_map_lines)
# Must specify at least one replicate.
self.assertRaises(ValueError,
qiime.simsam.create_replicated_mapping_file, self.map_f, 0,
self.otu_table.ids())
def test_simsam_range_correct_number_of_output(self):
"""simsam_range yields correct number of output tables
"""
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[1], [0.1], self.tutorial_map)
self.assertEqual(len(list(actual)), 1)
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[1, 2], [0.1], self.tutorial_map)
self.assertEqual(len(list(actual)), 2)
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[2], [0.1, 0.001], self.tutorial_map)
self.assertEqual(len(list(actual)), 2)
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[1, 2], [0.1, 0.001], self.tutorial_map)
self.assertEqual(len(list(actual)), 4)
def test_simsam_range_correct_size_of_output(self):
"""simsam_range yields tables with correct number of samples"""
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[1], [0.1], self.tutorial_map)
actual = list(actual)
self.assertEqual(
len(actual[0][0].ids()),
len(self.tutorial_otu_table.ids()))
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[2], [0.1], self.tutorial_map)
actual = list(actual)
self.assertEqual(
len(actual[0][0].ids()),
2 * len(self.tutorial_otu_table.ids()))
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table, self.tutorial_tree,
[4], [0.1], self.tutorial_map)
actual = list(actual)
self.assertEqual(
len(actual[0][0].ids()),
4 * len(self.tutorial_otu_table.ids()))
def test_simsam_range_functions_without_mapping_file(self):
"""simsam_range yields correct number of output tables
"""
actual = qiime.simsam.simsam_range(
self.tutorial_otu_table,
self.tutorial_tree,
[1],
[0.1])
self.assertEqual(len(list(actual)), 1)
def test_simsam_range_to_files(self):
"""simsam_range_to_files functions as expected """
qiime.simsam.simsam_range_to_files(self.tutorial_otu_table,
self.tutorial_tree,
[2],
[0.1],
output_dir=self.test_out,
mapping_f=self.tutorial_map,
output_table_basename="hello",
output_map_basename="world")
self.assertTrue(exists('%s/hello_n2_d0.1.biom' % self.test_out))
self.assertTrue(exists('%s/world_n2_d0.1.txt' % self.test_out))
# confirm same sample ids in table and mapping file
t = load_table('%s/hello_n2_d0.1.biom' % self.test_out)
d, _, _ = \
parse_mapping_file(open('%s/world_n2_d0.1.txt' % self.test_out))
mapping_sample_ids = [e[0] for e in d]
self.assertItemsEqual(t.ids(), mapping_sample_ids)
map_lines = """#SampleID\tTreatment\tDescription
# Some epic comment
S1\tControl\tControl1
S2\tControl\tControl2
S3\tControl\tControl3
S4\tFast\tFast4
S5\tFast\tFast5"""
exp_rep_map_lines = """#SampleID\tTreatment\tDescription
# Some epic comment
S1.0\tControl\tControl1
S1.1\tControl\tControl1
S1.2\tControl\tControl1
S2.0\tControl\tControl2
S2.1\tControl\tControl2
S2.2\tControl\tControl2
S3.0\tControl\tControl3
S3.1\tControl\tControl3
S3.2\tControl\tControl3"""
otu_table_lines = """{"id": "None","format": "Biological Observation Matrix 1.0.0","format_url": "http://biom-format.org","type": "OTU table","generated_by": "BIOM-Format 1.1.2","date": "2013-03-27T13:59:38.949014","matrix_type": "sparse","matrix_element_type": "float","shape": [4, 3], "data": [[0,0,1.0],[0,2,4.0],[1,1,5.0],[1,2,7.0],[2,0,1.0],[2,1,1.0],[2,2,9.0],[3,0,6.0],[3,1,10.0],[3,2,8.0]],"rows": [{"id": "OTU0", "metadata": null},{"id": "OTU1", "metadata": null},{"id": "OTU2", "metadata": null},{"id": "OTU3", "metadata": null}],"columns": [{"id": "S1", "metadata": null},{"id": "S2", "metadata": null},{"id": "S3", "metadata": null}]}"""
tutorial_tree = """(((((381:0.0213,(214:0.03728,253:0.00015)0.945:0.03224)0.763:0.00483,((269:0.02693,(231:0.00509,(105:0.01425,141:0.02641)0.846:0.01405)0.428:0.00519)0.622:0.00014,404:0.00524)0.795:0.00514)0.773:0.00508,(131:0.00518,(33:0.01631,((284:0.00828,(176:0.03098,388:0.01236)0.901:0.02175)0.885:0.01273,52:0.01046)0.743:0.00498)0.924:0.01603)0.779:0.00511)0.772:0.00014,153:0.00507)0.753:0.00602,(223:0.03237,(172:0.01733,81:0.03834)0.224:0.00414)0.845:0.01076,(136:0.00627,((((265:0.01557,200:0.00517)0.674:0.00014,((204:0.00015,(339:0.01613,(322:0.01633,268:0.01643)0.569:0.0107)0.885:0.00016)0.840:0.00527,((((((((280:0.02348,(395:0.00015,(48:0.03014,((30:0.02665,316:0.01921)0.813:0.01152,215:0.0242)0.850:0.01191)0.320:0.00016)0.912:0.02431)0.694:0.01482,(115:0.01526,364:0.08211)0.879:0.03637)0.677:0.03567,((((((162:0.06933,59:0.02113)0.991:0.08563,(308:0.02061,43:0.03488)0.894:0.04949)0.911:0.05006,(((344:0.00015,(146:0.00015,377:0.01634)0.924:0.0108)0.918:0.01069,((201:0.011,240:0.04792)1.000:0.00015,(61:0.00015,96:0.00523)0.781:0.00514)0.828:0.01056)0.809:0.00016,196:0.04505)0.213:0.00014)0.650:0.00529,(((161:0.01191,(390:0.04307,37:0.03893)0.933:0.03396)0.814:0.01401,68:0.04946)0.953:0.03303,((341:0.01127,393:0.02765)0.941:0.02238,(82:0.01112,(350:0.01141,(156:0.01636,356:0.00015)0.863:0.02214)0.946:0.02475)0.748:0.00565)0.761:0.00968)0.748:0.00836)0.927:0.0224,271:0.05902)0.753:0.00511,(((((((217:0.03796,379:0.00016)0.973:0.05805,(299:0.08963,(382:0.06426,((317:0.00016,((205:0.00532,264:0.03867)0.939:0.01605,(194:0.03374,(32:0.01052,(348:0.02212,157:0.02743)1.000:0.00014)0.868:0.02793)0.745:0.00531)0.336:0.01061)0.789:0.00604,334:0.02104)0.598:0.01527)0.687:0.00354)0.836:0.01564)0.811:0.01617,(292:0.06237,84:0.02159)0.934:0.04776)0.864:0.02103,301:0.06716)0.698:0.0046,272:0.00539)0.809:0.0115,88:0.05965)0.860:0.01208,(276:0.01065,279:0.03443)0.891:0.01124)0.090:0.00014)0.924:0.03938)0.953:0.05227,281:0.02828)0.691:0.00622,25:0.01213)0.727:0.00397,((261:0.01613,((147:0.01555,20:0.00016)0.967:0.02125,(107:0.01089,349:0.03426)0.757:0.00478)0.750:0.00518)0.799:0.0052,(259:0.01616,63:0.01053)0.764:0.00523)0.792:0.00511)1.000:0.00016,(72:0.05949,(1:0.01425,67:0.0377)0.751:0.00762)0.867:0.01609)0.807:0.00507,((49:0.01645,116:0.01633)0.736:0.00514,(398:0.00515,(((180:0.04458,99:0.0328)0.913:0.02521,(((410:0.05589,(((150:0.04425,(170:0.03163,((250:0.00693,331:0.00435)1.000:0.10845,357:0.01319)0.850:0.0225)0.879:0.02887)0.749:0.00795,(((((23:0.00919,248:0.08024)0.405:0.03691,(358:0.05635,369:0.07223)0.978:0.09469)0.888:0.05975,(234:0.07249,8:0.00016)0.712:0.01829)0.976:0.07916,(((275:0.094,(((114:0.0269,302:0.02202)0.985:0.06964,(213:0.06889,42:0.03436)0.415:0.01928)0.795:0.02064,((110:0.05188,342:0.01457)0.967:0.08524,((123:0.02756,343:0.0481)0.800:0.01738,((298:0.03283,(124:0.02507,6:0.03351)0.781:0.01076)0.939:0.03194,309:0.04124)0.820:0.01321)0.985:0.0961)0.928:0.06559)0.902:0.03886)0.684:0.03217,373:0.06838)0.909:0.03592,((290:0.02673,380:0.00015)1.000:0.16099,(((90:0.09952,192:0.10171)0.679:0.01316,(326:0.03972,45:0.09053)0.965:0.05309)0.115:0.00014,(375:0.00015,(221:0.00071,278:0.05255)1.000:0.08313)1.000:0.10921)0.623:0.0222)0.892:0.03509)0.465:0.00015)0.980:0.05443,(((306:0.08813,385:0.14214)0.269:0.00862,((256:0.01776,(273:0.07543,69:0.01333)0.591:0.02343)0.883:0.02549,((132:0.02365,219:0.01597)0.897:0.02388,(100:0.01243,50:0.0237)0.226:0.01766)0.961:0.04348)0.848:0.01577)0.998:0.08323,(241:0.23207,(130:0.24778,(53:0.12887,(129:0.07692,318:0.01288)0.900:0.04845)0.817:0.02143)0.888:0.05464)0.657:0.01537)0.822:0.01876)0.828:0.01549)0.773:0.01019,((98:0.12681,((148:0.0294,391:0.00571)0.989:0.07803,(389:0.10107,(252:0.00014,362:0.01104)0.964:0.06682)0.834:0.03217)0.762:0.0152)0.524:0.0181,(0:0.0483,(135:0.01151,(300:0.0175,(274:0.04561,((((166:0.02935,355:0.00015)0.833:0.00565,41:0.00014)0.807:0.00586,(226:0.01038,92:0.0044)0.792:0.00425)0.961:0.03236,((360:0.01752,7:0.0182)0.748:0.00495,(368:0.02316,288:0.01783)0.759:0.00622)0.707:0.00573)0.841:0.00015)0.949:0.02275)0.745:0.00559)0.855:0.02344)0.876:0.03532)0.885:0.02567)0.752:0.00645)0.782:0.00969,(((((((178:0.01576,(230:0.02704,64:0.02146)0.869:0.0108)0.809:0.01014,((122:0.00448,354:0.0225)0.855:0.01127,(333:0.01086,406:0.01648)0.748:0.00433)0.789:0.00624)0.171:0.00516,((416:0.04298,(400:0.01045,74:0.01051)0.923:0.00014)0.862:0.02166,(307:0.04097,(260:0.03574,335:0.0434)0.747:0.00875)0.916:0.02837)0.843:0.00987)0.804:0.00016,((237:0.09447,((370:0.01631,(319:0.04803,(60:0.01986,405:0.01742)0.560:0.01574)0.898:0.01971)0.918:0.01584,(384:0.02116,(245:0.01047,(177:0.0051,(183:0.03226,413:0.00014)0.826:0.00518)0.777:0.00501)0.923:0.0158)0.622:0.00016)0.685:0.00099)0.224:0.02406,((22:0.03142,5:0.06696)0.870:0.03448,47:0.0347)0.763:0.01052)0.847:0.01209)0.743:0.00534,((((62:0.00137,(121:0.00016,78:0.04376)1.000:0.10609)0.942:0.0378,(311:0.05626,407:0.06902)0.944:0.04614)0.703:0.00608,(((188:0.01993,202:0.02611)0.914:0.02118,(328:0.0273,337:0.00015)0.815:0.01019)0.852:0.01169,(330:0.03441,((386:0.13035,(392:0.00544,(321:0.02191,4:0.01061)0.763:0.0052)0.932:0.00014)0.671:0.01096,145:0.01556)0.829:0.01073)0.735:0.00529)0.840:0.01052)0.849:0.01531,(262:0.0683,((310:0.05551,((83:0.01296,(127:0.01909,212:0.01393)0.090:0.00499)0.876:0.01352,(104:0.00014,171:0.01061)0.895:0.01717)0.877:0.02683)0.940:0.03929,(119:0.0152,179:0.00197)0.889:0.02843)0.066:0.01551)0.839:0.01374)0.820:0.01069)0.869:0.01061,(((293:0.01741,168:0.04514)0.046:0.01491,345:0.03334)0.248:0.01629,(31:0.04727,97:0.04999)0.915:0.03556)0.811:0.01631)0.010:0.00016,(((94:0.0671,(108:0.00014,229:0.06991)0.630:0.01827)0.982:0.06031,(143:0.02201,((((((198:0.02745,(140:0.14724,75:0.02831)0.817:0.0209)0.851:0.01902,(((282:0.06783,54:0.00015)0.952:0.03641,((313:0.03746,80:0.00524)0.872:0.0215,2:0.07468)0.862:0.02589)0.916:0.03914,((367:0.0099,(((128:0.0425,((111:0.06727,11:0.00495)0.974:0.02953,283:0.02606)0.504:0.00357)0.862:0.02044,(289:0.04546,(399:0.00319,((((152:0.00014,19:0.06307)0.992:0.03752,154:0.00016)0.786:0.00014,134:0.06945)0.997:0.06109,51:0.00014)0.994:0.04556)0.353:0.00583)0.482:0.00828)0.933:0.03536,112:0.07957)0.734:0.00733)0.962:0.08492,403:0.10375)0.869:0.0525)0.894:0.03949)0.645:0.00925,((((287:0.00534,15:0.05518)0.920:0.03189,(((304:0.00508,409:0.00015)0.991:0.00014,(120:0.00015,(57:0.04309,56:0.0156)0.759:0.00015)0.902:0.01019)0.339:0.01644,173:0.094)0.787:0.01131)1.000:0.07731,(236:0.00625,((26:0.04569,(((351:0.005,(27:0.03624,(137:0.01569,(314:0.00015,408:0.03277)0.991:0.03257)0.806:0.00498)0.851:0.00588)0.928:0.01791,((133:0.04374,(227:0.00527,(412:0.00014,(175:0.00507,((95:0.01566,210:0.00014)0.438:0.01045,191:0.00016)0.781:0.00518)0.815:0.00508)0.859:0.01021)0.745:0.00667)0.735:0.01956,((((12:0.01588,415:0.01701)0.121:0.03139,(73:0.04886,(17:0.00016,(46:0.02083,378:0.01021)0.886:0.01027)0.785:0.019)0.719:0.02118)0.774:0.01959,329:0.01522)0.777:0.01121,(((286:0.00722,(394:0.01596,(372:0.00015,225:0.0446)0.884:0.0109)0.929:0.02558)0.584:0.00985,218:0.02283)0.888:0.01478,159:0.02121)0.739:0.00866)0.851:0.01129)0.728:0.00602)0.866:0.01998,93:0.04869)0.604:0.00297)0.648:0.01633,199:0.06704)0.788:0.01956)0.371:0.01052)0.827:0.01491,((244:0.0262,(126:0.00015,163:0.03192)0.984:0.04377)0.817:0.01306,((216:0.00014,(86:0.02257,(21:0.01127,34:0.01066)0.859:0.01088)0.622:0.017)0.998:0.19434,(233:0.00244,(182:0.01898,(239:0.02877,267:0.00015)0.839:0.01438)0.999:0.09419)0.975:0.15234)0.877:0.07457)0.893:0.0244)0.821:0.02013)0.998:0.10422,(195:0.10508,((249:0.0368,(336:0.04596,((263:0.02407,(277:0.01295,190:0.03788)0.823:0.01671)0.698:0.0068,197:0.01756)0.309:0.01631)0.860:0.01866)0.926:0.02656,(303:0.04293,(113:0.04423,347:0.04295)0.930:0.03972)0.885:0.02484)0.701:0.00015)0.902:0.03629)0.841:0.02905,(246:0.00014,(125:0.03009,184:0.0229)0.998:0.07478)0.999:0.10301)0.936:0.04978,((247:0.04204,((((((238:0.01393,(109:0.01081,39:0.02762)0.769:0.00519)0.758:0.00702,(257:0.01539,85:0.07408)0.746:0.00558)0.755:0.01039,(363:0.04294,155:0.00015)0.943:0.02426)0.894:0.01745,266:0.00586)0.948:0.03346,55:0.02705)0.739:0.00453,203:0.00015)0.855:0.02077)0.995:0.07638,327:0.00745)0.921:0.03692)0.553:0.01549)0.970:0.05544)0.858:0.02855,338:0.08163)0.892:0.03304)0.759:0.00673)0.945:0.02495,((((((((102:0.04317,36:0.02415)0.964:0.03758,65:0.00505)0.822:0.01078,366:0.00016)0.811:0.01537,(315:0.01071,((151:0.0,160:0.0):0.00016,340:0.00014)0.842:0.01037)0.951:0.02712)0.724:0.00057,(185:0.04527,(207:0.01304,76:0.00341)0.949:0.03474)0.845:0.0196)0.871:0.0106,(371:0.02805,(164:0.0104,242:0.02179)0.758:0.0052)0.771:0.00538)0.841:0.01097,174:0.13953)0.831:0.01033,(144:0.01866,(3:0.01578,312:0.00015)0.785:0.00532)0.780:0.00615)0.752:0.00572)0.667:0.00244)0.268:0.00339,((101:0.04199,77:0.00334)0.965:0.0345,((14:0.01106,294:0.00502)0.891:0.01811,(285:0.01062,397:0.01076)0.758:0.00896)0.163:0.01034)0.850:0.01331)0.563:0.00537)0.800:0.00519)0.930:0.00016)0.759:0.01023)1.000:0.00014)0.850:0.00015,(243:0.03373,220:0.01032)0.888:0.011)0.540:0.00014,(189:0.02629,(((139:0.0155,186:0.01757)0.420:0.01444,(((((((165:0.0059,58:0.03297)0.779:0.02132,((222:0.01678,(323:0.02243,44:0.04081)0.819:0.01102)0.063:0.00015,(106:0.03989,149:0.02047)0.775:0.01298)0.706:0.0074)0.957:0.03281,((((258:0.04247,87:0.0123)0.500:0.01067,235:0.00735)0.645:0.00296,208:0.00505)1.000:0.00015,((18:0.00454,(((10:0.04233,(414:0.00016,(142:0.01127,66:0.03479)0.756:0.00498)0.726:0.00685)0.486:0.01639,181:0.00014)0.784:0.00501,(167:0.01463,(320:0.00885,402:0.00881)0.791:0.00014)0.839:0.01499)0.773:0.00524)0.893:0.01079,(169:0.00517,(295:0.01586,297:0.03792)0.262:0.00016)0.778:0.00521)0.818:0.00528)0.764:0.01062)0.767:0.00486,70:0.00512)0.766:0.00495,(((332:0.00016,((325:0.01591,(383:0.00014,(361:0.01642,(138:0.04133,(158:0.0036,224:0.00657)0.840:0.01972)0.769:0.00881)0.777:0.00496)0.882:0.01036)0.752:0.00492,(24:0.03974,((((254:0.00541,(251:0.00015,(324:0.02187,((117:0.0052,(374:0.03165,270:0.02362)0.731:0.00708)0.791:0.00525,13:0.01621)0.757:0.00511)0.607:0.01283)0.889:0.0192)0.852:0.01583,305:0.01647)0.948:0.00015,211:0.00015)0.419:0.00016,(103:0.01686,209:0.05269)0.861:0.01595)0.937:0.01635)0.756:0.00523)0.878:0.01048)0.776:0.00238,(365:0.03251,((38:0.04434,79:0.00014)0.758:0.00016,(296:0.043,9:0.00518)0.693:0.0162)0.508:0.00805)0.766:0.00767)0.764:0.00313,(((359:0.02181,(16:0.04469,(232:0.01621,(118:0.03421,(29:0.01612,353:0.01494)0.293:0.01034)0.864:0.01326)0.747:0.01394)0.724:0.0072)0.911:0.01681,387:0.02755)0.761:0.00523,(346:0.01957,(376:0.04072,71:0.0547)0.829:0.0181)0.750:0.00673)0.823:0.01037)0.774:0.0054)0.789:0.005,(((228:0.00529,((401:0.02214,((187:0.00532,411:0.00526)0.801:0.00583,((89:0.027,193:0.00014)0.787:0.00524,91:0.01618)0.743:0.0045)0.548:0.00548)0.825:0.016,40:0.02807)0.778:0.00992)0.824:0.01011,255:0.05012)0.966:0.00014,(352:0.01585,396:0.00014)0.784:0.02134)0.880:0.0107)0.901:0.0194,(35:0.0209,(206:0.00836,291:0.06414)0.439:0.00793)0.753:0.00846)0.763:0.00968)0.942:0.02851,28:0.0208)0.742:0.01057)0.781:0.00811)0.802:0.02029)0.750:0.01578);"""
tutorial_otu_table = """{"rows": [{"id": "0", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "1", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "2", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Bacillales", "f__Staphylococcaceae"]}}, {"id": "3", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "4", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "5", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "6", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "7", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "8", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "9", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "10", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "11", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "12", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "13", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "14", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Streptococcaceae"]}}, {"id": "15", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "16", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "17", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "18", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "19", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "20", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "21", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "22", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "23", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "24", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "25", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "26", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "27", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "28", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__TM7", "c__TM7-3", "o__CW040", "f__F16"]}}, {"id": "29", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "30", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "31", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "32", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "33", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "34", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "35", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "36", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "37", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "38", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "39", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "40", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "41", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "42", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "43", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "44", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "45", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "46", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "47", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "48", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "49", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Deferribacteres", "c__Deferribacteres", "o__Deferribacterales", "f__Deferribacteraceae"]}}, {"id": "50", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "51", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "52", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "53", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "54", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "55", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "56", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "57", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "58", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "59", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "60", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "61", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "62", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "63", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "64", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "65", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "66", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "67", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "68", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "69", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "70", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "71", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "72", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "73", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "74", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "75", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "76", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "77", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "78", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "79", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "80", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "81", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "82", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "83", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "84", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "85", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "86", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "87", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "88", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "89", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "90", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "91", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "92", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "93", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "94", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "95", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Mollicutes", "o__RF39", "f__"]}}, {"id": "96", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "97", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "98", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "99", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "100", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "101", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "102", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "103", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "104", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "105", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "106", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "107", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "108", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "109", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "110", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "111", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "112", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "113", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "114", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "115", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "116", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Deferribacteres", "c__Deferribacteres", "o__Deferribacterales", "f__Deferribacteraceae"]}}, {"id": "117", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "118", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "119", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "120", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "121", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "122", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "123", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "124", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "125", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "126", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "127", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "128", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Mollicutes", "o__RF39", "f__"]}}, {"id": "129", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "130", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "131", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "132", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Peptococcaceae"]}}, {"id": "133", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "134", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "135", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "136", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "137", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "138", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "139", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "140", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "141", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "142", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "143", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "144", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "145", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "146", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "147", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "148", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "149", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "150", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "151", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "152", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "153", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "154", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales"]}}, {"id": "155", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "156", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "157", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "158", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "159", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "160", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "161", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "162", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "163", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "164", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "165", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "166", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "167", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "168", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "169", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "170", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "171", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "172", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "173", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "174", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "175", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "176", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "177", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Clostridiaceae"]}}, {"id": "178", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "179", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "180", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "181", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "182", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "183", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "184", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "185", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "186", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "187", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "188", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "189", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "190", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "191", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "192", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "193", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "194", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "195", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "196", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "197", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "198", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "199", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Peptococcaceae"]}}, {"id": "200", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "201", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "202", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "203", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "204", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "205", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "206", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "207", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "208", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "209", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "210", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "211", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "212", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "213", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "214", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "215", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "216", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "217", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "218", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "219", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "220", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "221", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "222", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "223", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "224", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "225", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "226", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "227", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "228", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "229", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "230", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "231", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "232", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "233", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "234", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "235", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "236", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "237", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "238", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "239", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "240", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "241", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "242", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "243", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "244", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "245", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "246", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "247", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "248", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "249", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "250", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "251", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "252", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "253", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "254", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "255", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "256", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "257", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "258", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "259", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "260", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "261", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "262", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "263", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "264", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "265", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "266", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "267", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "268", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "269", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "270", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "271", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "272", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "273", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "274", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "275", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "276", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "277", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "278", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "279", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "280", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "281", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "282", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "283", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "284", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "285", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "286", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "287", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "288", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "289", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "290", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "291", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "292", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "293", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "294", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "295", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "296", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "297", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Bacillales", "f__Staphylococcaceae"]}}, {"id": "298", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "299", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "300", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "301", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "302", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "303", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "304", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "305", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "306", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "307", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "308", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Prevotellaceae"]}}, {"id": "309", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "310", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "311", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "312", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "313", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "314", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "315", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "316", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "317", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Epsilonproteobacteria", "o__Campylobacterales", "f__Helicobacteraceae"]}}, {"id": "318", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "319", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "320", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "321", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "322", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "323", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "324", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "325", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "326", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "327", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "328", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "329", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "330", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "331", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "332", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "333", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "334", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "335", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "336", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "337", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "338", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "339", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "340", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "341", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "342", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "343", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "344", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "345", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "346", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "347", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "348", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "349", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "350", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "351", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "352", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "353", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes"]}}, {"id": "354", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "355", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "356", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "357", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "358", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "359", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Clostridiales Family XIII. Incertae Sedis"]}}, {"id": "360", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "361", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "362", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "363", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "364", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "365", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "366", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "367", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "368", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "369", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "370", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "371", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "372", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "373", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "374", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "375", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "376", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "377", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "378", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "379", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "380", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "381", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "382", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "383", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "384", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "385", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "386", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "387", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "388", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "389", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "390", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "391", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "392", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "393", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "394", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "395", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "396", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "397", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "398", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "399", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "400", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "401", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "402", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "403", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "404", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "405", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Clostridiaceae"]}}, {"id": "406", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "407", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Turicibacterales", "f__Turicibacteraceae"]}}, {"id": "408", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "409", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "410", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "411", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "412", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "413", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "414", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "415", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "416", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}], "format": "Biological Observation Matrix v0.9", "data": [[0, 6, 2.0], [0, 7, 1.0], [1, 8, 1.0], [2, 8, 1.0], [3, 3, 1.0], [4, 0, 1.0], [5, 1, 1.0], [5, 2, 1.0], [5, 4, 1.0], [6, 3, 1.0], [6, 7, 1.0], [7, 4, 1.0], [8, 0, 1.0], [8, 2, 1.0], [8, 7, 1.0], [8, 8, 1.0], [9, 3, 1.0], [10, 0, 1.0], [10, 1, 1.0], [11, 0, 2.0], [11, 1, 1.0], [12, 2, 1.0], [12, 7, 1.0], [13, 7, 1.0], [14, 5, 1.0], [15, 2, 1.0], [16, 6, 1.0], [17, 6, 1.0], [18, 6, 1.0], [19, 0, 1.0], [19, 3, 1.0], [20, 5, 1.0], [21, 1, 1.0], [22, 5, 2.0], [23, 7, 1.0], [24, 3, 1.0], [24, 7, 1.0], [25, 7, 1.0], [26, 4, 1.0], [27, 6, 1.0], [28, 6, 2.0], [29, 6, 1.0], [30, 7, 1.0], [31, 3, 3.0], [32, 5, 1.0], [33, 2, 1.0], [34, 7, 1.0], [35, 5, 1.0], [36, 5, 2.0], [37, 0, 1.0], [37, 2, 1.0], [38, 1, 1.0], [38, 3, 1.0], [38, 4, 1.0], [38, 8, 3.0], [39, 5, 5.0], [40, 3, 1.0], [40, 5, 2.0], [41, 5, 1.0], [42, 5, 1.0], [43, 0, 1.0], [44, 5, 2.0], [45, 4, 1.0], [46, 5, 1.0], [47, 0, 16.0], [47, 4, 12.0], [48, 0, 6.0], [48, 2, 4.0], [48, 4, 2.0], [49, 5, 3.0], [49, 6, 5.0], [49, 7, 2.0], [49, 8, 6.0], [50, 3, 1.0], [51, 3, 1.0], [52, 4, 1.0], [53, 0, 1.0], [54, 4, 1.0], [55, 4, 1.0], [56, 5, 1.0], [57, 8, 1.0], [58, 0, 1.0], [59, 3, 1.0], [59, 6, 4.0], [59, 7, 10.0], [59, 8, 37.0], [60, 5, 1.0], [61, 5, 1.0], [62, 4, 1.0], [63, 0, 1.0], [63, 1, 3.0], [63, 2, 1.0], [63, 3, 2.0], [63, 4, 1.0], [63, 5, 9.0], [63, 6, 2.0], [63, 7, 4.0], [63, 8, 5.0], [64, 5, 1.0], [65, 8, 1.0], [66, 2, 1.0], [67, 1, 1.0], [68, 5, 1.0], [69, 5, 1.0], [70, 0, 2.0], [70, 1, 1.0], [70, 2, 10.0], [70, 3, 2.0], [70, 4, 24.0], [70, 7, 1.0], [70, 8, 1.0], [71, 3, 1.0], [72, 5, 1.0], [73, 4, 2.0], [73, 5, 2.0], [73, 6, 1.0], [73, 7, 4.0], [73, 8, 1.0], [74, 6, 2.0], [74, 7, 3.0], [74, 8, 2.0], [75, 2, 2.0], [75, 7, 1.0], [75, 8, 2.0], [76, 1, 1.0], [77, 1, 1.0], [78, 7, 2.0], [78, 8, 2.0], [79, 0, 1.0], [80, 7, 1.0], [81, 2, 1.0], [82, 2, 1.0], [83, 2, 1.0], [84, 3, 1.0], [84, 6, 1.0], [84, 7, 2.0], [84, 8, 19.0], [85, 2, 1.0], [86, 3, 1.0], [87, 3, 1.0], [88, 3, 1.0], [89, 6, 1.0], [89, 7, 3.0], [90, 7, 1.0], [91, 7, 1.0], [92, 1, 4.0], [92, 2, 4.0], [92, 4, 1.0], [92, 5, 2.0], [92, 7, 2.0], [92, 8, 1.0], [93, 7, 1.0], [94, 4, 1.0], [95, 2, 1.0], [96, 0, 1.0], [96, 1, 4.0], [96, 2, 2.0], [96, 3, 6.0], [97, 0, 2.0], [98, 8, 1.0], [99, 2, 1.0], [100, 0, 1.0], [100, 2, 2.0], [101, 6, 1.0], [102, 3, 1.0], [102, 6, 3.0], [102, 7, 1.0], [103, 3, 1.0], [104, 5, 1.0], [105, 5, 1.0], [106, 8, 1.0], [107, 1, 1.0], [108, 1, 1.0], [109, 0, 1.0], [109, 1, 1.0], [109, 3, 2.0], [109, 4, 4.0], [110, 6, 1.0], [111, 6, 5.0], [112, 5, 1.0], [113, 1, 2.0], [113, 2, 1.0], [114, 4, 1.0], [115, 0, 1.0], [116, 8, 1.0], [117, 2, 1.0], [117, 3, 1.0], [118, 1, 1.0], [119, 0, 1.0], [119, 3, 1.0], [119, 5, 1.0], [120, 0, 1.0], [121, 1, 1.0], [121, 8, 1.0], [122, 1, 1.0], [123, 0, 1.0], [123, 1, 3.0], [123, 2, 1.0], [124, 2, 1.0], [124, 7, 1.0], [125, 0, 1.0], [126, 7, 1.0], [127, 0, 1.0], [128, 5, 1.0], [129, 0, 2.0], [129, 1, 2.0], [129, 2, 2.0], [130, 4, 1.0], [131, 0, 1.0], [132, 0, 1.0], [132, 7, 1.0], [133, 2, 1.0], [134, 6, 1.0], [135, 6, 1.0], [136, 0, 1.0], [136, 2, 1.0], [137, 5, 1.0], [138, 0, 1.0], [139, 0, 1.0], [140, 5, 1.0], [141, 3, 1.0], [142, 5, 1.0], [143, 2, 2.0], [143, 7, 1.0], [144, 7, 2.0], [145, 3, 6.0], [145, 7, 1.0], [146, 5, 1.0], [147, 8, 1.0], [148, 0, 4.0], [148, 1, 9.0], [148, 3, 2.0], [148, 7, 2.0], [149, 7, 1.0], [150, 3, 1.0], [151, 8, 1.0], [152, 7, 1.0], [153, 3, 1.0], [153, 6, 1.0], [153, 7, 5.0], [153, 8, 2.0], [154, 8, 1.0], [155, 5, 1.0], [156, 6, 1.0], [157, 6, 1.0], [158, 0, 1.0], [158, 3, 1.0], [159, 1, 2.0], [160, 1, 1.0], [161, 7, 1.0], [161, 8, 1.0], [162, 0, 14.0], [162, 1, 1.0], [162, 2, 14.0], [162, 3, 1.0], [163, 2, 1.0], [164, 7, 1.0], [164, 8, 1.0], [165, 0, 1.0], [165, 2, 1.0], [166, 0, 1.0], [166, 2, 1.0], [166, 3, 1.0], [167, 0, 2.0], [167, 1, 3.0], [167, 2, 8.0], [167, 4, 1.0], [168, 7, 1.0], [169, 3, 1.0], [170, 0, 1.0], [170, 4, 1.0], [171, 7, 1.0], [172, 4, 1.0], [173, 4, 1.0], [174, 8, 1.0], [175, 6, 1.0], [175, 8, 1.0], [176, 6, 1.0], [177, 0, 1.0], [177, 4, 10.0], [178, 6, 2.0], [179, 5, 1.0], [180, 2, 1.0], [180, 5, 2.0], [180, 7, 1.0], [181, 4, 9.0], [181, 5, 1.0], [182, 3, 1.0], [183, 3, 1.0], [184, 3, 1.0], [185, 5, 2.0], [186, 1, 1.0], [187, 3, 1.0], [187, 5, 1.0], [187, 7, 1.0], [187, 8, 1.0], [188, 7, 1.0], [189, 2, 1.0], [190, 3, 1.0], [190, 7, 1.0], [191, 2, 1.0], [192, 4, 4.0], [192, 8, 2.0], [193, 2, 1.0], [194, 5, 1.0], [195, 2, 1.0], [196, 2, 1.0], [197, 7, 1.0], [198, 3, 1.0], [199, 5, 1.0], [200, 5, 1.0], [201, 2, 1.0], [202, 0, 2.0], [202, 1, 1.0], [202, 3, 5.0], [202, 5, 1.0], [203, 0, 29.0], [203, 1, 1.0], [203, 2, 10.0], [204, 7, 1.0], [205, 2, 1.0], [206, 8, 1.0], [207, 8, 1.0], [208, 2, 2.0], [209, 3, 1.0], [210, 6, 13.0], [211, 6, 1.0], [212, 3, 2.0], [213, 2, 1.0], [214, 5, 2.0], [214, 7, 1.0], [215, 2, 1.0], [216, 4, 2.0], [216, 7, 5.0], [217, 3, 1.0], [217, 4, 4.0], [218, 0, 1.0], [218, 5, 4.0], [219, 1, 1.0], [220, 0, 2.0], [220, 2, 2.0], [220, 3, 1.0], [221, 2, 1.0], [222, 3, 4.0], [223, 8, 1.0], [224, 0, 1.0], [224, 2, 1.0], [225, 0, 2.0], [225, 1, 2.0], [225, 3, 1.0], [226, 0, 1.0], [227, 3, 1.0], [228, 4, 1.0], [229, 0, 1.0], [229, 1, 4.0], [229, 3, 1.0], [230, 0, 1.0], [230, 3, 5.0], [230, 4, 17.0], [230, 5, 20.0], [231, 5, 1.0], [232, 5, 2.0], [233, 7, 1.0], [234, 8, 1.0], [235, 7, 1.0], [236, 7, 1.0], [237, 5, 1.0], [238, 0, 1.0], [238, 3, 1.0], [239, 7, 1.0], [240, 2, 1.0], [240, 7, 1.0], [241, 2, 1.0], [242, 0, 1.0], [243, 6, 1.0], [244, 7, 1.0], [245, 5, 1.0], [246, 0, 1.0], [247, 5, 7.0], [247, 7, 2.0], [247, 8, 2.0], [248, 6, 1.0], [249, 3, 1.0], [250, 5, 1.0], [250, 7, 1.0], [251, 5, 1.0], [252, 5, 1.0], [252, 6, 1.0], [253, 4, 1.0], [254, 0, 1.0], [254, 2, 1.0], [255, 7, 1.0], [256, 3, 1.0], [257, 3, 2.0], [258, 3, 1.0], [258, 5, 1.0], [259, 3, 2.0], [260, 7, 1.0], [261, 2, 1.0], [262, 3, 1.0], [263, 2, 1.0], [264, 0, 1.0], [265, 2, 1.0], [266, 8, 1.0], [267, 6, 1.0], [267, 8, 1.0], [268, 5, 1.0], [269, 1, 1.0], [269, 4, 1.0], [270, 8, 1.0], [271, 7, 1.0], [272, 3, 1.0], [273, 0, 1.0], [273, 7, 2.0], [274, 8, 1.0], [275, 0, 1.0], [275, 1, 5.0], [275, 2, 3.0], [275, 3, 2.0], [275, 8, 1.0], [276, 2, 2.0], [277, 5, 1.0], [278, 5, 6.0], [278, 7, 3.0], [279, 4, 1.0], [280, 3, 1.0], [281, 5, 1.0], [282, 1, 2.0], [282, 3, 2.0], [282, 7, 1.0], [283, 7, 1.0], [284, 4, 1.0], [285, 4, 1.0], [286, 6, 2.0], [286, 8, 1.0], [287, 6, 5.0], [288, 4, 1.0], [289, 1, 1.0], [289, 6, 1.0], [290, 6, 1.0], [291, 1, 1.0], [292, 8, 1.0], [293, 3, 1.0], [294, 1, 1.0], [294, 8, 1.0], [295, 7, 1.0], [296, 7, 1.0], [297, 8, 2.0], [298, 7, 1.0], [299, 5, 1.0], [300, 3, 1.0], [301, 5, 1.0], [302, 7, 1.0], [303, 1, 1.0], [304, 5, 1.0], [304, 7, 1.0], [305, 1, 1.0], [306, 5, 1.0], [307, 2, 1.0], [308, 7, 1.0], [309, 2, 2.0], [310, 5, 1.0], [311, 1, 1.0], [312, 0, 1.0], [312, 2, 2.0], [312, 5, 6.0], [313, 7, 1.0], [314, 8, 1.0], [315, 1, 2.0], [315, 3, 7.0], [315, 7, 2.0], [316, 7, 1.0], [317, 4, 5.0], [317, 5, 2.0], [318, 3, 1.0], [319, 0, 11.0], [319, 1, 12.0], [319, 2, 5.0], [319, 3, 13.0], [319, 4, 2.0], [320, 5, 1.0], [321, 6, 1.0], [321, 7, 1.0], [321, 8, 1.0], [322, 6, 1.0], [323, 6, 1.0], [324, 3, 1.0], [325, 6, 1.0], [326, 2, 1.0], [327, 1, 1.0], [327, 8, 1.0], [328, 5, 1.0], [329, 5, 1.0], [330, 4, 1.0], [331, 8, 1.0], [332, 0, 1.0], [333, 2, 2.0], [334, 1, 1.0], [335, 5, 1.0], [336, 1, 2.0], [336, 2, 1.0], [337, 1, 1.0], [338, 8, 1.0], [339, 2, 1.0], [340, 5, 1.0], [341, 0, 3.0], [342, 7, 1.0], [343, 7, 1.0], [344, 4, 2.0], [345, 8, 1.0], [346, 6, 1.0], [347, 0, 1.0], [347, 4, 2.0], [347, 6, 2.0], [347, 8, 3.0], [348, 5, 1.0], [349, 6, 2.0], [349, 7, 1.0], [350, 4, 3.0], [351, 1, 1.0], [352, 3, 1.0], [353, 3, 1.0], [354, 5, 1.0], [355, 2, 1.0], [356, 6, 1.0], [357, 3, 1.0], [358, 6, 1.0], [359, 5, 1.0], [359, 6, 1.0], [360, 7, 1.0], [361, 4, 2.0], [361, 6, 1.0], [362, 7, 1.0], [363, 7, 1.0], [364, 2, 1.0], [365, 6, 1.0], [366, 5, 1.0], [367, 1, 1.0], [368, 7, 1.0], [368, 8, 1.0], [369, 7, 1.0], [370, 3, 2.0], [371, 1, 2.0], [371, 2, 2.0], [371, 3, 4.0], [371, 5, 5.0], [371, 6, 1.0], [371, 7, 5.0], [372, 1, 1.0], [373, 7, 1.0], [374, 6, 1.0], [375, 2, 2.0], [376, 4, 1.0], [377, 5, 5.0], [377, 6, 9.0], [377, 7, 5.0], [377, 8, 3.0], [378, 0, 2.0], [378, 1, 18.0], [378, 3, 1.0], [378, 6, 21.0], [378, 7, 4.0], [378, 8, 4.0], [379, 3, 1.0], [380, 1, 2.0], [380, 5, 1.0], [381, 2, 1.0], [382, 0, 1.0], [382, 2, 1.0], [383, 2, 1.0], [384, 2, 1.0], [384, 7, 1.0], [385, 2, 1.0], [385, 8, 1.0], [386, 6, 1.0], [387, 1, 19.0], [387, 2, 2.0], [387, 4, 2.0], [387, 6, 3.0], [388, 7, 1.0], [389, 2, 1.0], [390, 2, 1.0], [391, 3, 1.0], [392, 2, 1.0], [393, 0, 1.0], [393, 1, 1.0], [393, 2, 1.0], [394, 1, 1.0], [394, 2, 2.0], [394, 4, 9.0], [394, 5, 1.0], [394, 6, 1.0], [394, 7, 1.0], [394, 8, 3.0], [395, 6, 2.0], [396, 1, 2.0], [396, 2, 3.0], [396, 3, 1.0], [396, 4, 4.0], [396, 6, 5.0], [396, 8, 4.0], [397, 6, 1.0], [398, 8, 1.0], [399, 1, 2.0], [399, 2, 1.0], [399, 3, 2.0], [399, 6, 1.0], [399, 7, 1.0], [399, 8, 1.0], [400, 1, 1.0], [401, 3, 1.0], [402, 1, 1.0], [403, 6, 1.0], [404, 6, 4.0], [405, 1, 1.0], [405, 6, 3.0], [406, 8, 1.0], [407, 3, 9.0], [407, 6, 3.0], [408, 4, 1.0], [409, 8, 1.0], [410, 0, 1.0], [411, 2, 1.0], [412, 6, 8.0], [412, 7, 10.0], [412, 8, 2.0], [413, 2, 1.0], [413, 3, 3.0], [414, 1, 1.0], [415, 3, 1.0], [416, 3, 1.0]], "columns": [{"id": "PC.354", "metadata": null}, {"id": "PC.355", "metadata": null}, {"id": "PC.356", "metadata": null}, {"id": "PC.481", "metadata": null}, {"id": "PC.593", "metadata": null}, {"id": "PC.607", "metadata": null}, {"id": "PC.634", "metadata": null}, {"id": "PC.635", "metadata": null}, {"id": "PC.636", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2520", "matrix_type": "sparse", "shape": [417, 9], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2011-12-20T17:28:05.299943", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
tutorial_map = """#SampleID BarcodeSequence LinkerPrimerSequence Treatment DOB Description
#Example mapping file for the QIIME analysis package. These 9 samples are from a study of the effects of exercise and diet on mouse cardiac physiology (Crawford, et al, PNAS, 2009).
PC.354 AGCACGAGCCTA YATGCTGCCTCCCGTAGGAGT Control 20061218 Control_mouse_I.D._354
PC.355 AACTCGTCGATG YATGCTGCCTCCCGTAGGAGT Control 20061218 Control_mouse_I.D._355
PC.356 ACAGACCACTCA YATGCTGCCTCCCGTAGGAGT Control 20061126 Control_mouse_I.D._356
PC.481 ACCAGCGACTAG YATGCTGCCTCCCGTAGGAGT Control 20070314 Control_mouse_I.D._481
PC.593 AGCAGCACTTGT YATGCTGCCTCCCGTAGGAGT Control 20071210 Control_mouse_I.D._593
PC.607 AACTGTGCGTAC YATGCTGCCTCCCGTAGGAGT Fast 20071112 Fasting_mouse_I.D._607
PC.634 ACAGAGTCGGCT YATGCTGCCTCCCGTAGGAGT Fast 20080116 Fasting_mouse_I.D._634
PC.635 ACCGCAGAGTCA YATGCTGCCTCCCGTAGGAGT Fast 20080116 Fasting_mouse_I.D._635
Photobomb what? this sample shouldn't be in here!
PC.636 ACGGTGAGTGTC YATGCTGCCTCCCGTAGGAGT Fast 20080116 Fasting_mouse_I.D._636
"""
if __name__ == "__main__":
main()
| gpl-2.0 |
petersoncarter/453-desktop-emulator | src/simulator/mtTkinter.py | 12 | 8683 | '''Thread-safe version of Tkinter.
Copyright (c) 2009, Allen B. Taylor
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
import mtTkinter as Tkinter
# Use "Tkinter." as usual.
or
from mtTkinter import *
# Use Tkinter module definitions as usual.
This module modifies the original Tkinter module in memory, making all
functionality thread-safe. It does this by wrapping the Tk class' tk
instance with an object that diverts calls through an event queue when
the call is issued from a thread other than the thread in which the Tk
instance was created. The events are processed in the creation thread
via an 'after' event.
The modified Tk class accepts two additional keyword parameters on its
__init__ method:
mtDebug:
0 = No debug output (default)
1 = Minimal debug output
...
9 = Full debug output
mtCheckPeriod:
Amount of time in milliseconds (default 100) between checks for
out-of-thread events when things are otherwise idle. Decreasing
this value can improve GUI responsiveness, but at the expense of
consuming more CPU cycles.
Note that, because it modifies the original Tkinter module (in memory),
other modules that use Tkinter (e.g., Pmw) reap the benefits automagically
as long as mtTkinter is imported at some point before extra threads are
created.
Author: Allen B. Taylor, a.b.taylor@gmail.com
'''
from Tkinter import *
import threading
import Queue
class _Tk(object):
"""
Wrapper for underlying attribute tk of class Tk.
"""
def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10):
self._tk = tk
# Create the incoming event queue.
self._eventQueue = Queue.Queue(1)
# Identify the thread from which this object is being created so we can
# tell later whether an event is coming from another thread.
self._creationThread = threading.currentThread()
# Store remaining values.
self._debug = mtDebug
self._checkPeriod = mtCheckPeriod
def __getattr__(self, name):
# Divert attribute accesses to a wrapper around the underlying tk
# object.
return _TkAttr(self, getattr(self._tk, name))
class _TkAttr(object):
"""
Thread-safe callable attribute wrapper.
"""
def __init__(self, tk, attr):
self._tk = tk
self._attr = attr
def __call__(self, *args, **kwargs):
"""
Thread-safe method invocation.
Diverts out-of-thread calls through the event queue.
Forwards all other method calls to the underlying tk object directly.
"""
# Check if we're in the creation thread.
if threading.currentThread() == self._tk._creationThread:
# We're in the creation thread; just call the event directly.
if self._tk._debug >= 8 or \
self._tk._debug >= 3 and self._attr.__name__ == 'call' and \
len(args) >= 1 and args[0] == 'after':
print 'Calling event directly:', \
self._attr.__name__, args, kwargs
return self._attr(*args, **kwargs)
else:
# We're in a different thread than the creation thread; enqueue
# the event, and then wait for the response.
responseQueue = Queue.Queue(1)
if self._tk._debug >= 1:
print 'Marshalling event:', self._attr.__name__, args, kwargs
self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue))
isException, response = responseQueue.get()
# Handle the response, whether it's a normal return value or
# an exception.
if isException:
exType, exValue, exTb = response
raise exType, exValue, exTb
else:
return response
# Define a hook for class Tk's __init__ method.
def _Tk__init__(self, *args, **kwargs):
# We support some new keyword arguments that the original __init__ method
# doesn't expect, so separate those out before doing anything else.
new_kwnames = ('mtCheckPeriod', 'mtDebug')
new_kwargs = {}
for name, value in kwargs.items():
if name in new_kwnames:
new_kwargs[name] = value
del kwargs[name]
# Call the original __init__ method, creating the internal tk member.
self.__original__init__mtTkinter(*args, **kwargs)
# Replace the internal tk member with a wrapper that handles calls from
# other threads.
self.tk = _Tk(self.tk, **new_kwargs)
# Set up the first event to check for out-of-thread events.
self.after_idle(_CheckEvents, self)
# Replace Tk's original __init__ with the hook.
Tk.__original__init__mtTkinter = Tk.__init__
Tk.__init__ = _Tk__init__
def _CheckEvents(tk):
"Event checker event."
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
method, args, kwargs, responseQueue = \
tk.tk._eventQueue.get_nowait()
except:
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
if tk.tk._debug >= 2:
print 'Calling event from main thread:', \
method.__name__, args, kwargs
try:
responseQueue.put((False, method(*args, **kwargs)))
except SystemExit, ex:
raise SystemExit, ex
except Exception, ex:
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info
exType, exValue, exTb = exc_info()
responseQueue.put((True, (exType, exValue, exTb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_CheckEvents, tk)
else:
tk.after(tk.tk._checkPeriod, _CheckEvents, tk)
# Test thread entry point.
def _testThread(root):
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
try:
if root.globalgetvar('tcl_platform(threaded)'):
text = text + "\nTcl is built with thread support"
else:
raise RuntimeError
except:
text = text + "\nTcl is NOT built with thread support"
text = text + "\nmtTkinter works with or without Tcl thread support"
label = Label(root, text=text)
label.pack()
button = Button(root, text="Click me!",
command=lambda root=root: root.button.configure(
text="[%s]" % root.button['text']))
button.pack()
root.button = button
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
# Simulate button presses...
button.invoke()
root.after(1000, _pressOk, root, button)
# Test button continuous press event.
def _pressOk(root, button):
button.invoke()
try:
root.after(1000, _pressOk, root, button)
except:
pass # Likely we're exiting
# Test. Mostly borrowed from the Tkinter module, but the important bits moved
# into a separate thread.
if __name__ == '__main__':
import threading
root = Tk(mtDebug = 1)
thread = threading.Thread(target = _testThread, args=(root,))
thread.start()
root.mainloop()
thread.join()
| mit |
brendandahl/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_wsh.py | 465 | 1776 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
| mpl-2.0 |
yousafsyed/casperjs | bin/Lib/test/test_xml_etree.py | 72 | 97748 | # IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
import html
import io
import operator
import pickle
import sys
import types
import unittest
import weakref
from itertools import product
from test import support
from test.support import TESTFN, findfile, import_fresh_module, gc_collect
# pyET is the pure-Python implementation.
#
# ET is pyET in test_xml_etree and is the C accelerated version in
# test_xml_etree_c.
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
class ModuleTest(unittest.TestCase):
# TODO: this should be removed once we get rid of the global module vars
def test_sanity(self):
# Import sanity.
from xml.etree import ElementTree
from xml.etree import ElementInclude
from xml.etree import ElementPath
def serialize(elem, to_string=True, encoding='unicode', **options):
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize_list(seq):
return [elem.tag for elem in seq]
class ElementTestCase:
@classmethod
def setUpClass(cls):
cls.modules = {pyET, ET}
def pickleRoundTrip(self, obj, name, dumper, loader):
save_m = sys.modules[name]
try:
sys.modules[name] = dumper
temp = pickle.dumps(obj)
sys.modules[name] = loader
result = pickle.loads(temp)
except pickle.PicklingError as pe:
# pyET must be second, because pyET may be (equal to) ET.
human = dict([(ET, "cET"), (pyET, "pyET")])
raise support.TestFailed("Failed to round-trip %r from %r to %r"
% (obj,
human.get(dumper, dumper),
human.get(loader, loader))) from pe
finally:
sys.modules[name] = save_m
return result
def assertEqualElements(self, alice, bob):
self.assertIsInstance(alice, (ET.Element, pyET.Element))
self.assertIsInstance(bob, (ET.Element, pyET.Element))
self.assertEqual(len(list(alice)), len(list(bob)))
for x, y in zip(alice, bob):
self.assertEqualElements(x, y)
properties = operator.attrgetter('tag', 'tail', 'text', 'attrib')
self.assertEqual(properties(alice), properties(bob))
# --------------------------------------------------------------------
# element tree tests
class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(ET.iselement(element), msg="not an element")
self.assertTrue(hasattr(element, "tag"), msg="no tag member")
self.assertTrue(hasattr(element, "attrib"), msg="no attrib member")
self.assertTrue(hasattr(element, "text"), msg="no text member")
self.assertTrue(hasattr(element, "tail"), msg="no tail member")
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = ET.Element("tag")
check_element(element)
tree = ET.ElementTree(element)
check_element(tree.getroot())
element = ET.Element("t\xe4g", key="value")
tree = ET.ElementTree(element)
self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = ET.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
self.assertEqual(ET.XML, ET.fromstring)
self.assertEqual(ET.PI, ET.ProcessingInstruction)
def test_simpleops(self):
# Basic method sanity checks.
elem = ET.XML("<body><tag/></body>")
self.serialize_check(elem, '<body><tag /></body>')
e = ET.Element("tag2")
elem.append(e)
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
self.serialize_check(elem, '<body><tag /></body>')
elem.insert(0, e)
self.serialize_check(elem, '<body><tag2 /><tag /></body>')
elem.remove(e)
elem.extend([e])
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
element = ET.Element("tag", key="value")
self.serialize_check(element, '<tag key="value" />') # 1
subelement = ET.Element("subtag")
element.append(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
element.insert(0, subelement)
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>') # 3
element.remove(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
element.remove(subelement)
self.serialize_check(element, '<tag key="value" />') # 5
with self.assertRaises(ValueError) as cm:
element.remove(subelement)
self.assertEqual(str(cm.exception), 'list.remove(x): x not in list')
self.serialize_check(element, '<tag key="value" />') # 6
element[0:0] = [subelement, subelement, subelement]
self.serialize_check(element[1], '<subtag />')
self.assertEqual(element[1:9], [element[1], element[2]])
self.assertEqual(element[:9:2], [element[0], element[2]])
del element[1:2]
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>')
def test_cdata(self):
# Test CDATA handling (etc).
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
'<tag>hello</tag>')
def test_file_init(self):
stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
tree = ET.ElementTree(file=stringfile)
self.assertEqual(tree.find("tag").tag, 'tag')
self.assertEqual(tree.find("section/tag").tag, 'tag')
tree = ET.ElementTree(file=SIMPLE_XMLFILE)
self.assertEqual(tree.find("element").tag, 'element')
self.assertEqual(tree.find("element/../empty-element").tag,
'empty-element')
def test_path_cache(self):
# Check that the path cache behaves sanely.
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
self.assertEqual(len(ElementPath._cache), cache_len_10)
for i in range(20): ET.ElementTree(elem).find('./'+str(i))
self.assertGreater(len(ElementPath._cache), cache_len_10)
for i in range(600): ET.ElementTree(elem).find('./'+str(i))
self.assertLess(len(ElementPath._cache), 500)
def test_copy(self):
# Test copy handling (etc).
import copy
e1 = ET.XML("<tag>hello<foo/></tag>")
e2 = copy.copy(e1)
e3 = copy.deepcopy(e1)
e1.find("foo").tag = "bar"
self.serialize_check(e1, '<tag>hello<bar /></tag>')
self.serialize_check(e2, '<tag>hello<bar /></tag>')
self.serialize_check(e3, '<tag>hello<foo /></tag>')
def test_attrib(self):
# Test attribute handling.
elem = ET.Element("tag")
elem.get("key") # 1.1
self.assertEqual(elem.get("key", "default"), 'default') # 1.2
elem.set("key", "value")
self.assertEqual(elem.get("key"), 'value') # 1.3
elem = ET.Element("tag", key="value")
self.assertEqual(elem.get("key"), 'value') # 2.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
attrib = {"key": "value"}
elem = ET.Element("tag", attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 3.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
attrib = {"key": "value"}
elem = ET.Element("tag", **attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 4.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
elem = ET.Element("tag", {"key": "other"}, key="value")
self.assertEqual(elem.get("key"), 'value') # 5.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
elem = ET.Element('test')
elem.text = "aa"
elem.set('testa', 'testval')
elem.set('testb', 'test2')
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test2">aa</test>')
self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
self.assertEqual(sorted(elem.items()),
[('testa', 'testval'), ('testb', 'test2')])
self.assertEqual(elem.attrib['testb'], 'test2')
elem.attrib['testb'] = 'test1'
elem.attrib['testc'] = 'test2'
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test1" testc="test2">aa</test>')
def test_makeelement(self):
# Test makeelement handling.
elem = ET.Element("tag")
attrib = {"key": "value"}
subelem = elem.makeelement("subtag", attrib)
self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.clear()
self.serialize_check(elem, '<tag />')
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.extend([subelem, subelem])
self.serialize_check(elem,
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
elem[:] = [subelem]
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem[:] = tuple([subelem])
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
def test_parsefile(self):
# Test parsing from file.
tree = ET.parse(SIMPLE_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
tree = ET.parse(SIMPLE_NS_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<ns0:root xmlns:ns0="namespace">\n'
' <ns0:element key="value">text</ns0:element>\n'
' <ns0:element>text</ns0:element>tail\n'
' <ns0:empty-element />\n'
'</ns0:root>')
with open(SIMPLE_XMLFILE) as f:
data = f.read()
parser = ET.XMLParser()
self.assertRegex(parser.version, r'^Expat ')
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
target = ET.TreeBuilder()
parser = ET.XMLParser(target=target)
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
def test_parseliteral(self):
element = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
element = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
sequence = ["<html><body>", "text</bo", "dy></html>"]
element = ET.fromstringlist(sequence)
self.assertEqual(ET.tostring(element),
b'<html><body>text</body></html>')
self.assertEqual(b"".join(ET.tostringlist(element)),
b'<html><body>text</body></html>')
self.assertEqual(ET.tostring(element, "ascii"),
b"<?xml version='1.0' encoding='ascii'?>\n"
b"<html><body>text</body></html>")
_, ids = ET.XMLID("<html><body>text</body></html>")
self.assertEqual(len(ids), 0)
_, ids = ET.XMLID("<html><body id='body'>text</body></html>")
self.assertEqual(len(ids), 1)
self.assertEqual(ids["body"].tag, 'body')
def test_iterparse(self):
# Test iterparse interface.
iterparse = ET.iterparse
context = iterparse(SIMPLE_XMLFILE)
action, elem = next(context)
self.assertEqual((action, elem.tag), ('end', 'element'))
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', 'element'),
('end', 'empty-element'),
('end', 'root'),
])
self.assertEqual(context.root.tag, 'root')
context = iterparse(SIMPLE_NS_XMLFILE)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', '{namespace}element'),
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
events = ()
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ()
context = iterparse(SIMPLE_XMLFILE, events=events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ("start", "end")
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
events = ("start", "end", "start-ns", "end-ns")
context = iterparse(SIMPLE_NS_XMLFILE, events)
self.assertEqual([(action, elem.tag) if action in ("start", "end")
else (action, elem)
for action, elem in context], [
('start-ns', ('', 'namespace')),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', None),
])
events = ('start-ns', 'end-ns')
context = iterparse(io.StringIO(r"<root xmlns=''/>"), events)
res = [action for action, elem in context]
self.assertEqual(res, ['start-ns', 'end-ns'])
events = ("start", "end", "bogus")
with self.assertRaises(ValueError) as cm:
with open(SIMPLE_XMLFILE, "rb") as f:
iterparse(f, events)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
source = io.BytesIO(
b"<?xml version='1.0' encoding='iso-8859-1'?>\n"
b"<body xmlns='http://éffbot.org/ns'\n"
b" xmlns:cl\xe9='http://effbot.org/ns'>text</body>\n")
events = ("start-ns",)
context = iterparse(source, events)
self.assertEqual([(action, elem) for action, elem in context], [
('start-ns', ('', 'http://\xe9ffbot.org/ns')),
('start-ns', ('cl\xe9', 'http://effbot.org/ns')),
])
source = io.StringIO("<document />junk")
it = iterparse(source)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
def test_writefile(self):
elem = ET.Element("tag")
elem.text = "text"
self.serialize_check(elem, '<tag>text</tag>')
ET.SubElement(elem, "subtag").text = "subtext"
self.serialize_check(elem, '<tag>text<subtag>subtext</subtag></tag>')
# Test tag suppression
elem.tag = None
self.serialize_check(elem, 'text<subtag>subtext</subtag>')
elem.insert(0, ET.Comment("comment"))
self.serialize_check(elem,
'text<!--comment--><subtag>subtext</subtag>') # assumes 1.3
elem[0] = ET.PI("key", "value")
self.serialize_check(elem, 'text<?key value?><subtag>subtext</subtag>')
def test_custom_builder(self):
# Test parser w. custom builder.
with open(SIMPLE_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
with open(SIMPLE_NS_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
def pi(self, target, data):
self.append(("pi", target, data))
def comment(self, data):
self.append(("comment", data))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('pi', 'pi', 'data'),
('comment', ' comment '),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
def test_getchildren(self):
# Test Element.getchildren()
with open(SIMPLE_XMLFILE, "rb") as f:
tree = ET.parse(f)
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getroot().iter()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getiterator()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
elem = ET.XML(SAMPLE_XML)
self.assertEqual(len(elem.getchildren()), 3)
self.assertEqual(len(elem[2].getchildren()), 1)
self.assertEqual(elem[:], elem.getchildren())
child1 = elem[0]
child2 = elem[2]
del elem[1:2]
self.assertEqual(len(elem.getchildren()), 2)
self.assertEqual(child1, elem[0])
self.assertEqual(child2, elem[1])
elem[0:2] = [child2, child1]
self.assertEqual(child2, elem[0])
self.assertEqual(child1, elem[1])
self.assertNotEqual(child1, elem[0])
elem.clear()
self.assertEqual(elem.getchildren(), [])
def test_writestring(self):
elem = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
elem = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
def test_encoding(self):
def check(encoding, body=''):
xml = ("<?xml version='1.0' encoding='%s'?><xml>%s</xml>" %
(encoding, body))
self.assertEqual(ET.XML(xml.encode(encoding)).text, body)
self.assertEqual(ET.XML(xml).text, body)
check("ascii", 'a')
check("us-ascii", 'a')
check("iso-8859-1", '\xbd')
check("iso-8859-15", '\u20ac')
check("cp437", '\u221a')
check("mac-roman", '\u02da')
def xml(encoding):
return "<?xml version='1.0' encoding='%s'?><xml />" % encoding
def bxml(encoding):
return xml(encoding).encode(encoding)
supported_encodings = [
'ascii', 'utf-8', 'utf-8-sig', 'utf-16', 'utf-16be', 'utf-16le',
'iso8859-1', 'iso8859-2', 'iso8859-3', 'iso8859-4', 'iso8859-5',
'iso8859-6', 'iso8859-7', 'iso8859-8', 'iso8859-9', 'iso8859-10',
'iso8859-13', 'iso8859-14', 'iso8859-15', 'iso8859-16',
'cp437', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852',
'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp869', 'cp874', 'cp1006', 'cp1125',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'mac-cyrillic', 'mac-greek', 'mac-iceland', 'mac-latin2',
'mac-roman', 'mac-turkish',
'iso2022-jp', 'iso2022-jp-1', 'iso2022-jp-2', 'iso2022-jp-2004',
'iso2022-jp-3', 'iso2022-jp-ext',
'koi8-r', 'koi8-u',
'hz', 'ptcp154',
]
for encoding in supported_encodings:
self.assertEqual(ET.tostring(ET.XML(bxml(encoding))), b'<xml />')
unsupported_ascii_compatible_encodings = [
'big5', 'big5hkscs',
'cp932', 'cp949', 'cp950',
'euc-jp', 'euc-jis-2004', 'euc-jisx0213', 'euc-kr',
'gb2312', 'gbk', 'gb18030',
'iso2022-kr', 'johab',
'shift-jis', 'shift-jis-2004', 'shift-jisx0213',
'utf-7',
]
for encoding in unsupported_ascii_compatible_encodings:
self.assertRaises(ValueError, ET.XML, bxml(encoding))
unsupported_ascii_incompatible_encodings = [
'cp037', 'cp424', 'cp500', 'cp864', 'cp875', 'cp1026', 'cp1140',
'utf_32', 'utf_32_be', 'utf_32_le',
]
for encoding in unsupported_ascii_incompatible_encodings:
self.assertRaises(ET.ParseError, ET.XML, bxml(encoding))
self.assertRaises(ValueError, ET.XML, xml('undefined').encode('ascii'))
self.assertRaises(LookupError, ET.XML, xml('xxx').encode('ascii'))
def test_methods(self):
# Test serialization methods.
e = ET.XML("<html><link/><script>1 < 2</script></html>")
e.tail = "\n"
self.assertEqual(serialize(e),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method=None),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="xml"),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="html"),
'<html><link><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="text"), '1 < 2\n')
def test_issue18347(self):
e = ET.XML('<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e),
'<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e, method="html"),
'<html><CamelCase>text</CamelCase></html>')
def test_entity(self):
# Test entity handling.
# 1) good entities
e = ET.XML("<document title='舰'>test</document>")
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<document title="舰">test</document>')
self.serialize_check(e, '<document title="\u8230">test</document>')
# 2) bad entities
with self.assertRaises(ET.ParseError) as cm:
ET.XML("<document>&entity;</document>")
self.assertEqual(str(cm.exception),
'undefined entity: line 1, column 10')
with self.assertRaises(ET.ParseError) as cm:
ET.XML(ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 5, column 10')
# 3) custom entity
parser = ET.XMLParser()
parser.entity["entity"] = "text"
parser.feed(ENTITY_XML)
root = parser.close()
self.serialize_check(root, '<document>text</document>')
def test_namespace(self):
# Test namespace issues.
# 1) xml namespace
elem = ET.XML("<tag xml:lang='en' />")
self.serialize_check(elem, '<tag xml:lang="en" />') # 1.1
# 2) other "well-known" namespaces
elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
self.serialize_check(elem,
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />') # 2.1
elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
self.serialize_check(elem,
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />') # 2.2
elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
self.serialize_check(elem,
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />') # 2.3
# 3) unknown namespaces
elem = ET.XML(SAMPLE_XML_NS)
self.serialize_check(elem,
'<ns0:body xmlns:ns0="http://effbot.org/ns">\n'
' <ns0:tag>text</ns0:tag>\n'
' <ns0:tag />\n'
' <ns0:section>\n'
' <ns0:tag>subtext</ns0:tag>\n'
' </ns0:section>\n'
'</ns0:body>')
def test_qname(self):
# Test QName handling.
# 1) decorated tags
elem = ET.Element("{uri}tag")
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.1
elem = ET.Element(ET.QName("{uri}tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.2
elem = ET.Element(ET.QName("uri", "tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.3
elem = ET.Element(ET.QName("uri", "tag"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>') # 1.4
# 2) decorated attributes
elem.clear()
elem.attrib["{uri}key"] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.1
elem.clear()
elem.attrib[ET.QName("{uri}key")] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.2
# 3) decorated values are not converted by default, but the
# QName wrapper can be used for values
elem.clear()
elem.attrib["{uri}key"] = "{uri}value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />') # 3.1
elem.clear()
elem.attrib["{uri}key"] = ET.QName("{uri}value")
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />') # 3.2
elem.clear()
subelem = ET.Element("tag")
subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
elem.append(subelem)
elem.append(subelem)
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2">'
'<tag ns1:key="ns2:value" />'
'<tag ns1:key="ns2:value" />'
'</ns0:tag>') # 3.3
# 4) Direct QName tests
self.assertEqual(str(ET.QName('ns', 'tag')), '{ns}tag')
self.assertEqual(str(ET.QName('{ns}tag')), '{ns}tag')
q1 = ET.QName('ns', 'tag')
q2 = ET.QName('ns', 'tag')
self.assertEqual(q1, q2)
q2 = ET.QName('ns', 'other-tag')
self.assertNotEqual(q1, q2)
self.assertNotEqual(q1, 'ns:tag')
self.assertEqual(q1, '{ns}tag')
def test_doctype_public(self):
# Test PUBLIC doctype.
elem = ET.XML('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
def test_xpath_tokenizer(self):
# Test the XPath tokenizer.
from xml.etree import ElementPath
def check(p, expected):
self.assertEqual([op or tag
for op, tag in ElementPath.xpath_tokenizer(p)],
expected)
# tests from the xml specification
check("*", ['*'])
check("text()", ['text', '()'])
check("@name", ['@', 'name'])
check("@*", ['@', '*'])
check("para[1]", ['para', '[', '1', ']'])
check("para[last()]", ['para', '[', 'last', '()', ']'])
check("*/para", ['*', '/', 'para'])
check("/doc/chapter[5]/section[2]",
['/', 'doc', '/', 'chapter', '[', '5', ']',
'/', 'section', '[', '2', ']'])
check("chapter//para", ['chapter', '//', 'para'])
check("//para", ['//', 'para'])
check("//olist/item", ['//', 'olist', '/', 'item'])
check(".", ['.'])
check(".//para", ['.', '//', 'para'])
check("..", ['..'])
check("../@lang", ['..', '/', '@', 'lang'])
check("chapter[title]", ['chapter', '[', 'title', ']'])
check("employee[@secretary and @assistant]", ['employee',
'[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'])
# additional tests
check("{http://spam}egg", ['{http://spam}egg'])
check("./spam.egg", ['.', '/', 'spam.egg'])
check(".//{http://spam}egg", ['.', '//', '{http://spam}egg'])
def test_processinginstruction(self):
# Test ProcessingInstruction directly
self.assertEqual(ET.tostring(ET.ProcessingInstruction('test', 'instruction')),
b'<?test instruction?>')
self.assertEqual(ET.tostring(ET.PI('test', 'instruction')),
b'<?test instruction?>')
# Issue #2746
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>')),
b'<?test <testing&>?>')
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>\xe3'), 'latin-1'),
b"<?xml version='1.0' encoding='latin-1'?>\n"
b"<?test <testing&>\xe3?>")
def test_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
for element in ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']:
for elem in [element, element.lower()]:
expected = '<%s>' % elem
serialized = serialize(ET.XML('<%s />' % elem), method='html')
self.assertEqual(serialized, expected)
serialized = serialize(ET.XML('<%s></%s>' % (elem,elem)),
method='html')
self.assertEqual(serialized, expected)
class XMLPullParserTest(unittest.TestCase):
def _feed(self, parser, data, chunk_size=None):
if chunk_size is None:
parser.feed(data)
else:
for i in range(0, len(data), chunk_size):
parser.feed(data[i:i+chunk_size])
def assert_event_tags(self, parser, expected):
events = parser.read_events()
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
def test_simple_xml(self):
for chunk_size in (None, 1, 5):
with self.subTest(chunk_size=chunk_size):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n", chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser,
"<root>\n <element key='value'>text</element",
chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser, ">\n", chunk_size)
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser, "<element>text</element>tail\n", chunk_size)
self._feed(parser, "<empty-element/>\n", chunk_size)
self.assert_event_tags(parser, [
('end', 'element'),
('end', 'empty-element'),
])
self._feed(parser, "</root>\n", chunk_size)
self.assert_event_tags(parser, [('end', 'root')])
self.assertIsNone(parser.close())
def test_feed_while_iterating(self):
parser = ET.XMLPullParser()
it = parser.read_events()
self._feed(parser, "<root>\n <element key='value'>text</element>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'element'))
self._feed(parser, "</root>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'root'))
with self.assertRaises(StopIteration):
next(it)
def test_simple_xml_with_ns(self):
parser = ET.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root xmlns='namespace'>\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', '{namespace}element')])
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self.assert_event_tags(parser, [
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
])
self._feed(parser, "</root>\n")
self.assert_event_tags(parser, [('end', '{namespace}root')])
self.assertIsNone(parser.close())
def test_ns_events(self):
parser = ET.XMLPullParser(events=('start-ns', 'end-ns'))
self._feed(parser, "<!-- comment -->\n")
self._feed(parser, "<root xmlns='namespace'>\n")
self.assertEqual(
list(parser.read_events()),
[('start-ns', ('', 'namespace'))])
self._feed(parser, "<element key='value'>text</element")
self._feed(parser, ">\n")
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self._feed(parser, "</root>\n")
self.assertEqual(list(parser.read_events()), [('end-ns', None)])
self.assertIsNone(parser.close())
def test_events(self):
parser = ET.XMLPullParser(events=())
self._feed(parser, "<root/>\n")
self.assert_event_tags(parser, [])
parser = ET.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
('end', '{foo}empty-element'),
('end', '{foo}element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
self.assert_event_tags(parser, [('end', 'root')])
parser = ET.XMLPullParser(events=('start',))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
])
self._feed(parser, "</root>")
self.assertIsNone(parser.close())
def test_events_sequence(self):
# Test that events can be some sequence that's not just a tuple or list
eventset = {'end', 'start'}
parser = ET.XMLPullParser(events=eventset)
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
class DummyIter:
def __init__(self):
self.events = iter(['start', 'end', 'start-ns'])
def __iter__(self):
return self
def __next__(self):
return next(self.events)
parser = ET.XMLPullParser(events=DummyIter())
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
def test_unknown_event(self):
with self.assertRaises(ValueError):
ET.XMLPullParser(events=('start', 'end', 'bogus'))
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(html.escape(SIMPLE_XMLFILE, True))
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
class XIncludeTest(unittest.TestCase):
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = ET.XML(data)
return data
def none_loader(self, href, parser, encoding=None):
return None
def _my_loader(self, href, parse):
# Used to avoid a test-dependency problem where the default loader
# of ElementInclude uses the pyET parser for cET tests.
if parse == 'xml':
with open(href, 'rb') as f:
return ET.parse(f).getroot()
else:
return None
def test_xinclude_default(self):
from xml.etree import ElementInclude
doc = self.xinclude_loader('default.xml')
ElementInclude.include(doc, self._my_loader)
self.assertEqual(serialize(doc),
'<document>\n'
' <p>Example.</p>\n'
' <root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>\n'
'</document>')
def test_xinclude(self):
from xml.etree import ElementInclude
# Basic inclusion example (XInclude C.1)
document = self.xinclude_loader("C1.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>120 Mz is adequate for an average home user.</p>\n'
' <disclaimer>\n'
' <p>The opinions represented herein represent those of the individual\n'
' and should not be interpreted as official policy endorsed by this\n'
' organization.</p>\n'
'</disclaimer>\n'
'</document>') # C1
# Textual inclusion example (XInclude C.2)
document = self.xinclude_loader("C2.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been accessed\n'
' 324387 times.</p>\n'
'</document>') # C2
# Textual inclusion after sibling element (based on modified XInclude C.2)
document = self.xinclude_loader("C2b.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been <em>accessed</em>\n'
' 324387 times.</p>\n'
'</document>') # C2b
# Textual inclusion of XML example (XInclude C.3)
document = self.xinclude_loader("C3.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>The following is the source of the "data.xml" resource:</p>\n'
" <example><?xml version='1.0'?>\n"
'<data>\n'
' <item><![CDATA[Brooks & Shields]]></item>\n'
'</data>\n'
'</example>\n'
'</document>') # C3
# Fallback example (XInclude C.5)
# Note! Fallback support is not yet implemented
document = self.xinclude_loader("C5.xml")
with self.assertRaises(OSError) as cm:
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception), 'resource not found')
self.assertEqual(serialize(document),
'<div xmlns:ns0="http://www.w3.org/2001/XInclude">\n'
' <ns0:include href="example.txt" parse="text">\n'
' <ns0:fallback>\n'
' <ns0:include href="fallback-example.txt" parse="text">\n'
' <ns0:fallback><a href="mailto:bob@example.org">Report error</a></ns0:fallback>\n'
' </ns0:include>\n'
' </ns0:fallback>\n'
' </ns0:include>\n'
'</div>') # C5
def test_xinclude_failures(self):
from xml.etree import ElementInclude
# Test failure to locate included XML file.
document = ET.XML(XINCLUDE["C1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'disclaimer.xml' as 'xml'")
# Test failure to locate included text file.
document = ET.XML(XINCLUDE["C2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'count.txt' as 'text'")
# Test bad parse type.
document = ET.XML(XINCLUDE_BAD["B1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"unknown parse type in xi:include tag ('BAD_TYPE')")
# Test xi:fallback outside xi:include.
document = ET.XML(XINCLUDE_BAD["B2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"xi:fallback tag must be child of xi:include "
"('{http://www.w3.org/2001/XInclude}fallback')")
# --------------------------------------------------------------------
# reported bugs
class BugsTest(unittest.TestCase):
def test_bug_xmltoolkit21(self):
# marshaller gives obscure errors for non-string values
def check(elem):
with self.assertRaises(TypeError) as cm:
serialize(elem)
self.assertEqual(str(cm.exception),
'cannot serialize 123 (type int)')
elem = ET.Element(123)
check(elem) # tag
elem = ET.Element("elem")
elem.text = 123
check(elem) # text
elem = ET.Element("elem")
elem.tail = 123
check(elem) # tail
elem = ET.Element("elem")
elem.set(123, "123")
check(elem) # attribute key
elem = ET.Element("elem")
elem.set("123", 123)
check(elem) # attribute value
def test_bug_xmltoolkit25(self):
# typo in ElementTree.findtext
elem = ET.XML(SAMPLE_XML)
tree = ET.ElementTree(elem)
self.assertEqual(tree.findtext("tag"), 'text')
self.assertEqual(tree.findtext("section/tag"), 'subtext')
def test_bug_xmltoolkit28(self):
# .//tag causes exceptions
tree = ET.XML("<doc><table><tbody/></table></doc>")
self.assertEqual(summarize_list(tree.findall(".//thead")), [])
self.assertEqual(summarize_list(tree.findall(".//tbody")), ['tbody'])
def test_bug_xmltoolkitX1(self):
# dump() doesn't flush the output buffer
tree = ET.XML("<doc><table><tbody/></table></doc>")
with support.captured_stdout() as stdout:
ET.dump(tree)
self.assertEqual(stdout.getvalue(), '<doc><table><tbody /></table></doc>\n')
def test_bug_xmltoolkit39(self):
# non-ascii element and attribute names doesn't work
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b"<tag \xe4ttr='välue' />")
self.assertEqual(tree.attrib, {'\xe4ttr': 'v\xe4lue'})
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<t\xe4g>text</t\xe4g>')
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<t\xc3\xa4g>text</t\xc3\xa4g>')
tree = ET.Element("t\u00e4g")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.Element("tag")
tree.set("\u00e4ttr", "v\u00e4lue")
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
def test_bug_xmltoolkit54(self):
# problems handling internally defined entities
e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]>"
'<doc>&ldots;</doc>')
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<doc>舰</doc>')
self.assertEqual(serialize(e), '<doc>\u8230</doc>')
def test_bug_xmltoolkit55(self):
# make sure we're reporting the first error, not the last
with self.assertRaises(ET.ParseError) as cm:
ET.XML(b"<!DOCTYPE doc SYSTEM 'doc.dtd'>"
b'<doc>&ldots;&ndots;&rdots;</doc>')
self.assertEqual(str(cm.exception),
'undefined entity &ldots;: line 1, column 36')
def test_bug_xmltoolkit60(self):
# Handle crash in stream source.
class ExceptionFile:
def read(self, x):
raise OSError
self.assertRaises(OSError, ET.parse, ExceptionFile())
def test_bug_xmltoolkit62(self):
# Don't crash when using custom entities.
ENTITIES = {'rsquo': '\u2019', 'lsquo': '\u2018'}
parser = ET.XMLParser()
parser.entity.update(ENTITIES)
parser.feed("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>""")
t = parser.close()
self.assertEqual(t.find('.//paragraph').text,
'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.')
def test_bug_xmltoolkit63(self):
# Check reference leak.
def xmltoolkit63():
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
xmltoolkit63()
count = sys.getrefcount(None)
for i in range(1000):
xmltoolkit63()
self.assertEqual(sys.getrefcount(None), count)
def test_bug_200708_newline(self):
# Preserve newlines in attributes.
e = ET.Element('SomeTag', text="def _f():\n return 3\n")
self.assertEqual(ET.tostring(e),
b'<SomeTag text="def _f(): return 3 " />')
self.assertEqual(ET.XML(ET.tostring(e)).get("text"),
'def _f():\n return 3\n')
self.assertEqual(ET.tostring(ET.XML(ET.tostring(e))),
b'<SomeTag text="def _f(): return 3 " />')
def test_bug_200708_close(self):
# Test default builder.
parser = ET.XMLParser() # default
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
# Test custom builder.
class EchoTarget:
def close(self):
return ET.Element("element") # simulate root
parser = ET.XMLParser(EchoTarget())
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
def test_bug_200709_default_namespace(self):
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 1
'<elem xmlns="default"><elem /></elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "{not-default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 2
'<elem xmlns="default" xmlns:ns1="not-default">'
'<elem />'
'<ns1:elem />'
'</elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "elem") # unprefixed name
with self.assertRaises(ValueError) as cm:
serialize(e, default_namespace="default") # 3
self.assertEqual(str(cm.exception),
'cannot use non-qualified names with default_namespace option')
def test_bug_200709_register_namespace(self):
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />')
ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />')
# And the Dublin Core namespace is in the default list:
e = ET.Element("{http://purl.org/dc/elements/1.1/}title")
self.assertEqual(ET.tostring(e),
b'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />')
def test_bug_200709_element_comment(self):
# Not sure if this can be fixed, really (since the serializer needs
# ET.Comment, not cET.comment).
a = ET.Element('a')
a.append(ET.Comment('foo'))
self.assertEqual(a[0].tag, ET.Comment)
a = ET.Element('a')
a.append(ET.PI('foo'))
self.assertEqual(a[0].tag, ET.PI)
def test_bug_200709_element_insert(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
c = ET.SubElement(a, 'c')
d = ET.Element('d')
a.insert(0, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'c'])
a.insert(-1, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'd', 'c'])
def test_bug_200709_iter_comment(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
comment_b = ET.Comment("TEST-b")
b.append(comment_b)
self.assertEqual(summarize_list(a.iter(ET.Comment)), [ET.Comment])
# --------------------------------------------------------------------
# reported on bugs.python.org
def test_bug_1534630(self):
bob = ET.TreeBuilder()
e = bob.data("data")
e = bob.start("tag", {})
e = bob.end("tag")
e = bob.close()
self.assertEqual(serialize(e), '<tag />')
def test_issue6233(self):
e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?>"
b'<body>t\xc3\xa3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<body>t\xe3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
def test_issue3151(self):
e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
self.assertEqual(e.tag, '{${stuff}}localname')
t = ET.ElementTree(e)
self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
def test_issue6565(self):
elem = ET.XML("<body><tag/></body>")
self.assertEqual(summarize_list(elem), ['tag'])
newelem = ET.XML(SAMPLE_XML)
elem[:] = newelem[:]
self.assertEqual(summarize_list(elem), ['tag', 'tag', 'section'])
def test_issue10777(self):
# Registering a namespace twice caused a "dictionary changed size during
# iteration" bug.
ET.register_namespace('test10777', 'http://myuri/')
ET.register_namespace('test10777', 'http://myuri/')
# --------------------------------------------------------------------
class BasicElementTest(ElementTestCase, unittest.TestCase):
def test_augmentation_type_errors(self):
e = ET.Element('joe')
self.assertRaises(TypeError, e.append, 'b')
self.assertRaises(TypeError, e.extend, [ET.Element('bar'), 'foo'])
self.assertRaises(TypeError, e.insert, 0, 'foo')
def test_cyclic_gc(self):
class Dummy:
pass
# Test the shortest cycle: d->element->d
d = Dummy()
d.dummyref = ET.Element('joe', attr=d)
wref = weakref.ref(d)
del d
gc_collect()
self.assertIsNone(wref())
# A longer cycle: d->e->e2->d
e = ET.Element('joe')
d = Dummy()
d.dummyref = e
wref = weakref.ref(d)
e2 = ET.SubElement(e, 'foo', attr=d)
del d, e, e2
gc_collect()
self.assertIsNone(wref())
# A cycle between Element objects as children of one another
# e1->e2->e3->e1
e1 = ET.Element('e1')
e2 = ET.Element('e2')
e3 = ET.Element('e3')
e1.append(e2)
e2.append(e2)
e3.append(e1)
wref = weakref.ref(e1)
del e1, e2, e3
gc_collect()
self.assertIsNone(wref())
def test_weakref(self):
flag = False
def wref_cb(w):
nonlocal flag
flag = True
e = ET.Element('e')
wref = weakref.ref(e, wref_cb)
self.assertEqual(wref().tag, 'e')
del e
self.assertEqual(flag, True)
self.assertEqual(wref(), None)
def test_get_keyword_args(self):
e1 = ET.Element('foo' , x=1, y=2, z=3)
self.assertEqual(e1.get('x', default=7), 1)
self.assertEqual(e1.get('w', default=7), 7)
def test_pickle(self):
# issue #16076: the C implementation wasn't pickleable.
for dumper, loader in product(self.modules, repeat=2):
e = dumper.Element('foo', bar=42)
e.text = "text goes here"
e.tail = "opposite of head"
dumper.SubElement(e, 'child').append(dumper.Element('grandchild'))
e.append(dumper.Element('child'))
e.findall('.//grandchild')[0].set('attr', 'other value')
e2 = self.pickleRoundTrip(e, 'xml.etree.ElementTree',
dumper, loader)
self.assertEqual(e2.tag, 'foo')
self.assertEqual(e2.attrib['bar'], 42)
self.assertEqual(len(e2), 2)
self.assertEqualElements(e, e2)
def test_pickle_issue18997(self):
for dumper, loader in product(self.modules, repeat=2):
XMLTEXT = """<?xml version="1.0"?>
<group><dogs>4</dogs>
</group>"""
e1 = dumper.fromstring(XMLTEXT)
if hasattr(e1, '__getstate__'):
self.assertEqual(e1.__getstate__()['tag'], 'group')
e2 = self.pickleRoundTrip(e1, 'xml.etree.ElementTree', dumper, loader)
self.assertEqual(e2.tag, 'group')
self.assertEqual(e2[0].tag, 'dogs')
class ElementTreeTypeTest(unittest.TestCase):
def test_istype(self):
self.assertIsInstance(ET.ParseError, type)
self.assertIsInstance(ET.QName, type)
self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
self.assertIsInstance(ET.TreeBuilder, type)
self.assertIsInstance(ET.XMLParser, type)
def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
pass
mye = MyElement('foo')
self.assertIsInstance(mye, ET.Element)
self.assertIsInstance(mye, MyElement)
self.assertEqual(mye.tag, 'foo')
# test that attribute assignment works (issue 14849)
mye.text = "joe"
self.assertEqual(mye.text, "joe")
def test_Element_subclass_constructor(self):
class MyElement(ET.Element):
def __init__(self, tag, attrib={}, **extra):
super(MyElement, self).__init__(tag + '__', attrib, **extra)
mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
self.assertEqual(mye.tag, 'foo__')
self.assertEqual(sorted(mye.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4)])
def test_Element_subclass_new_method(self):
class MyElement(ET.Element):
def newmethod(self):
return self.tag
mye = MyElement('joe')
self.assertEqual(mye.newmethod(), 'joe')
class ElementFindTest(unittest.TestCase):
def test_find_simple(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(e.find('tag').tag, 'tag')
self.assertEqual(e.find('section/tag').tag, 'tag')
self.assertEqual(e.find('./tag').tag, 'tag')
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(e.find('section/nexttag').tag, 'nexttag')
self.assertEqual(e.findtext('./tag'), 'text')
self.assertEqual(e.findtext('section/tag'), 'subtext')
# section/nexttag is found but has no text
self.assertEqual(e.findtext('section/nexttag'), '')
self.assertEqual(e.findtext('section/nexttag', 'default'), '')
# tog doesn't exist and 'default' kicks in
self.assertIsNone(e.findtext('tog'))
self.assertEqual(e.findtext('tog', 'default'), 'default')
# Issue #16922
self.assertEqual(ET.XML('<tag><empty /></tag>').findtext('empty'), '')
def test_find_xpath(self):
LINEAR_XML = '''
<body>
<tag class='a'/>
<tag class='b'/>
<tag class='c'/>
<tag class='d'/>
</body>'''
e = ET.XML(LINEAR_XML)
# Test for numeric indexing and last()
self.assertEqual(e.find('./tag[1]').attrib['class'], 'a')
self.assertEqual(e.find('./tag[2]').attrib['class'], 'b')
self.assertEqual(e.find('./tag[last()]').attrib['class'], 'd')
self.assertEqual(e.find('./tag[last()-1]').attrib['class'], 'c')
self.assertEqual(e.find('./tag[last()-2]').attrib['class'], 'b')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[-1]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()-0]')
self.assertRaisesRegex(SyntaxError, 'XPath', e.find, './tag[last()+1]')
def test_findall(self):
e = ET.XML(SAMPLE_XML)
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(summarize_list(e.findall('.')), ['body'])
self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
self.assertEqual(summarize_list(e.findall('tog')), [])
self.assertEqual(summarize_list(e.findall('tog/foo')), [])
self.assertEqual(summarize_list(e.findall('*')),
['tag', 'tag', 'section'])
self.assertEqual(summarize_list(e.findall('.//tag')),
['tag'] * 4)
self.assertEqual(summarize_list(e.findall('section/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('section//tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('section/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('section//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('section/.//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('*//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('*/./tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('./tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('././tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@class]')),
['tag'] * 3)
self.assertEqual(summarize_list(e.findall('.//tag[@class="a"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[@class="b"]')),
['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@id]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//section[tag]')),
['section'])
self.assertEqual(summarize_list(e.findall('.//section[element]')), [])
self.assertEqual(summarize_list(e.findall('../tag')), [])
self.assertEqual(summarize_list(e.findall('section/../tag')),
['tag'] * 2)
self.assertEqual(e.findall('section//'), e.findall('section//*'))
def test_test_find_with_ns(self):
e = ET.XML(SAMPLE_XML_NS)
self.assertEqual(summarize_list(e.findall('tag')), [])
self.assertEqual(
summarize_list(e.findall("{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 2)
self.assertEqual(
summarize_list(e.findall(".//{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 3)
def test_findall_different_nsmaps(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_bad_find(self):
e = ET.XML(SAMPLE_XML)
with self.assertRaisesRegex(SyntaxError, 'cannot use absolute path'):
e.findall('/tag')
def test_find_through_ElementTree(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
self.assertEqual(summarize_list(ET.ElementTree(e).findall('tag')),
['tag'] * 2)
# this produces a warning
self.assertEqual(summarize_list(ET.ElementTree(e).findall('//tag')),
['tag'] * 3)
class ElementIterTest(unittest.TestCase):
def _ilist(self, elem, tag=None):
return summarize_list(elem.iter(tag))
def test_basic(self):
doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
self.assertEqual(next(doc.iter()).tag, 'html')
self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
self.assertEqual(''.join(doc.find('body').itertext()),
'this is a paragraph.')
self.assertEqual(next(doc.itertext()), 'this is a ')
# iterparse should return an iterator
sourcefile = serialize(doc, to_string=False)
self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
# With an explitit parser too (issue #9708)
sourcefile = serialize(doc, to_string=False)
parser = ET.XMLParser(target=ET.TreeBuilder())
self.assertEqual(next(ET.iterparse(sourcefile, parser=parser))[0],
'end')
tree = ET.ElementTree(None)
self.assertRaises(AttributeError, tree.iter)
# Issue #16913
doc = ET.XML("<root>a&<sub>b&</sub>c&</root>")
self.assertEqual(''.join(doc.itertext()), 'a&b&c&')
def test_corners(self):
# single root, no subelements
a = ET.Element('a')
self.assertEqual(self._ilist(a), ['a'])
# one child
b = ET.SubElement(a, 'b')
self.assertEqual(self._ilist(a), ['a', 'b'])
# one child and one grandchild
c = ET.SubElement(b, 'c')
self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
# two children, only first with grandchild
d = ET.SubElement(a, 'd')
self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
# replace first child by second
a[0] = a[1]
del a[1]
self.assertEqual(self._ilist(a), ['a', 'd'])
def test_iter_by_tag(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
# test that iter also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.iter(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(self._ilist(doc), all_tags)
self.assertEqual(self._ilist(doc, '*'), all_tags)
class TreeBuilderTest(unittest.TestCase):
sample1 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text<div>subtext</div>tail</html>')
sample2 = '''<toplevel>sometext</toplevel>'''
def _check_sample1_element(self, e):
self.assertEqual(e.tag, 'html')
self.assertEqual(e.text, 'text')
self.assertEqual(e.tail, None)
self.assertEqual(e.attrib, {})
children = list(e)
self.assertEqual(len(children), 1)
child = children[0]
self.assertEqual(child.tag, 'div')
self.assertEqual(child.text, 'subtext')
self.assertEqual(child.tail, 'tail')
self.assertEqual(child.attrib, {})
def test_dummy_builder(self):
class BaseDummyBuilder:
def close(self):
return 42
class DummyBuilder(BaseDummyBuilder):
data = start = end = lambda *a: None
parser = ET.XMLParser(target=DummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=BaseDummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=object())
parser.feed(self.sample1)
self.assertIsNone(parser.close())
def test_treebuilder_elementfactory_none(self):
parser = ET.XMLParser(target=ET.TreeBuilder(element_factory=None))
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_subclass(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder()
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_element_factory(self):
lst = []
def myfactory(tag, attrib):
nonlocal lst
lst.append(tag)
return ET.Element(tag, attrib)
tb = ET.TreeBuilder(element_factory=myfactory)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample2)
parser.close()
self.assertEqual(lst, ['toplevel'])
def _check_element_factory_class(self, cls):
tb = ET.TreeBuilder(element_factory=cls)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self.assertIsInstance(e, cls)
self._check_sample1_element(e)
def test_element_factory_subclass(self):
class MyElement(ET.Element):
pass
self._check_element_factory_class(MyElement)
def test_element_factory_pure_python_subclass(self):
# Mimick SimpleTAL's behaviour (issue #16089): both versions of
# TreeBuilder should be able to cope with a subclass of the
# pure Python Element class.
base = ET._Element_Py
# Not from a C extension
self.assertEqual(base.__module__, 'xml.etree.ElementTree')
# Force some multiple inheritance with a C class to make things
# more interesting.
class MyElement(base, ValueError):
pass
self._check_element_factory_class(MyElement)
def test_doctype(self):
class DoctypeParser:
_doctype = None
def doctype(self, name, pubid, system):
self._doctype = (name, pubid, system)
def close(self):
return self._doctype
parser = ET.XMLParser(target=DoctypeParser())
parser.feed(self.sample1)
self.assertEqual(parser.close(),
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
class XMLParserTest(unittest.TestCase):
sample1 = b'<file><line>22</line></file>'
sample2 = (b'<!DOCTYPE html PUBLIC'
b' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
b'<html>text</html>')
sample3 = ('<?xml version="1.0" encoding="iso-8859-1"?>\n'
'<money value="$\xa3\u20ac\U0001017b">$\xa3\u20ac\U0001017b</money>')
def _check_sample_element(self, e):
self.assertEqual(e.tag, 'file')
self.assertEqual(e[0].tag, 'line')
self.assertEqual(e[0].text, '22')
def test_constructor_args(self):
# Positional args. The first (html) is not supported, but should be
# nevertheless correctly accepted.
parser = ET.XMLParser(None, ET.TreeBuilder(), 'utf-8')
parser.feed(self.sample1)
self._check_sample_element(parser.close())
# Now as keyword args.
parser2 = ET.XMLParser(encoding='utf-8',
html=[{}],
target=ET.TreeBuilder())
parser2.feed(self.sample1)
self._check_sample_element(parser2.close())
def test_subclass(self):
class MyParser(ET.XMLParser):
pass
parser = MyParser()
parser.feed(self.sample1)
self._check_sample_element(parser.close())
def test_subclass_doctype(self):
_doctype = None
class MyParserWithDoctype(ET.XMLParser):
def doctype(self, name, pubid, system):
nonlocal _doctype
_doctype = (name, pubid, system)
parser = MyParserWithDoctype()
with self.assertWarns(DeprecationWarning):
parser.feed(self.sample2)
parser.close()
self.assertEqual(_doctype,
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
def test_parse_string(self):
parser = ET.XMLParser(target=ET.TreeBuilder())
parser.feed(self.sample3)
e = parser.close()
self.assertEqual(e.tag, 'money')
self.assertEqual(e.attrib['value'], '$\xa3\u20ac\U0001017b')
self.assertEqual(e.text, '$\xa3\u20ac\U0001017b')
class NamespaceParseTest(unittest.TestCase):
def test_find_with_namespace(self):
nsmap = {'h': 'hello', 'f': 'foo'}
doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
class ElementSlicingTest(unittest.TestCase):
def _elem_tags(self, elemlist):
return [e.tag for e in elemlist]
def _subelem_tags(self, elem):
return self._elem_tags(list(elem))
def _make_elem_with_children(self, numchildren):
"""Create an Element with a tag 'a', with the given amount of children
named 'a0', 'a1' ... and so on.
"""
e = ET.Element('a')
for i in range(numchildren):
ET.SubElement(e, 'a%s' % i)
return e
def test_getslice_single_index(self):
e = self._make_elem_with_children(10)
self.assertEqual(e[1].tag, 'a1')
self.assertEqual(e[-2].tag, 'a8')
self.assertRaises(IndexError, lambda: e[12])
def test_getslice_range(self):
e = self._make_elem_with_children(6)
self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
def test_getslice_steps(self):
e = self._make_elem_with_children(10)
self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
def test_getslice_negative_steps(self):
e = self._make_elem_with_children(4)
self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
def test_delslice(self):
e = self._make_elem_with_children(4)
del e[0:2]
self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
e = self._make_elem_with_children(4)
del e[0:]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-1]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(4)
del e[1::2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(2)
del e[::2]
self.assertEqual(self._subelem_tags(e), ['a1'])
class IOTest(unittest.TestCase):
def tearDown(self):
support.unlink(TESTFN)
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
elem.text = "abc"
self.assertEqual(serialize(elem), '<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>abc</tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag><&"\'></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag><&\"'></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="<&"\'>" />')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"<&"'>\" />" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag>\xe5\xf6\xf6<></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>åöö<></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>åöö<></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag key="\xe5\xf6\xf6<>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="åöö<>" />')
for enc in ("iso-8859-1", "utf-16", "utf-16le", "utf-16be", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"åöö<>\" />" % enc).encode(enc))
def test_write_to_filename(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_text_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_read_from_stringio(self):
tree = ET.ElementTree()
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
tree.parse(stream)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_stringio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_bytesio(self):
tree = ET.ElementTree()
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree.parse(raw)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_bytesio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
tree.write(raw)
self.assertEqual(raw.getvalue(), b'''<site />''')
class dummy:
pass
def test_read_from_user_text_reader(self):
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = stream.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_user_text_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = raw.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
tree = ET.ElementTree()
def test_write_to_user_binary_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
self.assertEqual(raw.getvalue(), b'''<site />''')
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
writer.seekable = lambda: True
writer.tell = raw.tell
tree.write(writer, encoding="utf-16")
self.assertEqual(raw.getvalue(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_tostringlist_invariant(self):
root = ET.fromstring('<tag>foo</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
''.join(ET.tostringlist(root, 'unicode')))
self.assertEqual(
ET.tostring(root, 'utf-16'),
b''.join(ET.tostringlist(root, 'utf-16')))
def test_short_empty_elements(self):
root = ET.fromstring('<tag>a<x />b<y></y>c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=True),
'<tag>a<x />b<y />c</tag>')
self.assertEqual(
ET.tostring(root, 'unicode', short_empty_elements=False),
'<tag>a<x></x>b<y></y>c</tag>')
class ParseErrorTest(unittest.TestCase):
def test_subclass(self):
self.assertIsInstance(ET.ParseError(), SyntaxError)
def _get_error(self, s):
try:
ET.fromstring(s)
except ET.ParseError as e:
return e
def test_error_position(self):
self.assertEqual(self._get_error('foo').position, (1, 0))
self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
self.assertEqual(self._get_error('foobar<').position, (1, 6))
def test_error_code(self):
import xml.parsers.expat.errors as ERRORS
self.assertEqual(self._get_error('foo').code,
ERRORS.codes[ERRORS.XML_ERROR_SYNTAX])
class KeywordArgsTest(unittest.TestCase):
# Test various issues with keyword arguments passed to ET.Element
# constructor and methods
def test_issue14818(self):
x = ET.XML("<a>foo</a>")
self.assertEqual(x.find('a', None),
x.find(path='a', namespaces=None))
self.assertEqual(x.findtext('a', None, None),
x.findtext(path='a', default=None, namespaces=None))
self.assertEqual(x.findall('a', None),
x.findall(path='a', namespaces=None))
self.assertEqual(list(x.iterfind('a', None)),
list(x.iterfind(path='a', namespaces=None)))
self.assertEqual(ET.Element('a').attrib, {})
elements = [
ET.Element('a', dict(href="#", id="foo")),
ET.Element('a', attrib=dict(href="#", id="foo")),
ET.Element('a', dict(href="#"), id="foo"),
ET.Element('a', href="#", id="foo"),
ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
]
for e in elements:
self.assertEqual(e.tag, 'a')
self.assertEqual(e.attrib, dict(href="#", id="foo"))
e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
self.assertEqual(e2.attrib['key1'], 'value1')
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', "I'm not a dict")
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', attrib="I'm not a dict")
# --------------------------------------------------------------------
class NoAcceleratorTest(unittest.TestCase):
def setUp(self):
if not pyET:
raise unittest.SkipTest('only for the Python version')
# Test that the C accelerator was not imported for pyET
def test_correct_import_pyET(self):
# The type of methods defined in Python code is types.FunctionType,
# while the type of methods defined inside _elementtree is
# <class 'wrapper_descriptor'>
self.assertIsInstance(pyET.Element.__init__, types.FunctionType)
self.assertIsInstance(pyET.XMLParser.__init__, types.FunctionType)
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning))
self.checkwarnings = support.check_warnings(*deprecations, quiet=quiet)
def __enter__(self):
from xml.etree import ElementPath
self._nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
self._nsmap_copy = self._nsmap.copy()
# Copy the path cache (should be empty)
self._path_cache = ElementPath._cache
ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementPath
# Restore mapping and path cache
self._nsmap.clear()
self._nsmap.update(self._nsmap_copy)
ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
global pyET
pyET = import_fresh_module('xml.etree.ElementTree',
blocked=['_elementtree'])
if module is None:
module = pyET
global ET
ET = module
test_classes = [
ModuleTest,
ElementSlicingTest,
BasicElementTest,
ElementTreeTest,
IOTest,
ParseErrorTest,
XIncludeTest,
ElementTreeTypeTest,
ElementFindTest,
ElementIterTest,
TreeBuilderTest,
XMLParserTest,
XMLPullParserTest,
BugsTest,
]
# These tests will only run for the pure-Python version that doesn't import
# _elementtree. We can't use skipUnless here, because pyET is filled in only
# after the module is loaded.
if pyET is not ET:
test_classes.extend([
NoAcceleratorTest,
])
try:
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=(pyET is not ET)):
support.run_unittest(*test_classes)
finally:
# don't interfere with subsequent tests
ET = pyET = None
if __name__ == '__main__':
test_main()
| mit |
rgommers/statsmodels | statsmodels/tsa/tests/results/results_arima.py | 33 | 24118 | import os
import numpy as np
from numpy import genfromtxt
cur_dir = os.path.dirname(os.path.abspath(__file__))
forecast_results = genfromtxt(open(cur_dir+"/results_arima_forecasts.csv",
"rb"), names=True, delimiter=",", dtype=float)
#NOTE:
# stata gives no indication of no convergence for 112 CSS but gives a
# different answer than x12arima, gretl simply fails to converge
# redid stata with starting parameters from x12arima
# it looks like stata uses a different formula for the CSS likelihood
# they appear to be using a larger sample than R, gretl, or us.
# CSS results are therefore taken from R and gretl
class ARIMA111(object):
def __init__(self, method="mle"):
self.k_ar = 1
self.k_diff = 1
self.k_ma = 1
if method == "mle":
# from stata
from .arima111_results import results
# unpack stata results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
#their bse are OPG
#self.bse = np.diag(self.cov_params) ** .5
# from gretl
self.arroots = [1.0640 + 0j]
self.maroots = [1.2971 + 0j]
self.hqic = 496.8653
self.aic_gretl = 491.5112
self.bic_gretl = 504.7442
#self.bse = [.205811, .0457010, .0897565]
self.tvalues = [4.280, 20.57, -8.590]
self.pvalues = [1.87e-5, 5.53e-94, 8.73e-18]
self.cov_params = [[0.0423583, -0.00167449, 0.00262911],
[-0.00167449, 0.00208858, -0.0035068],
[0.00262911, -0.0035068, 0.00805622]]
self.bse = np.diag(np.sqrt(self.cov_params))
# from stata
#forecast = genfromtxt(open(cur_dir+"/arima111_forecasts.csv"),
# delimiter=",", skip_header=1, usecols=[1,2,3,4,5])
#self.forecast = forecast[203:,1]
#self.fcerr = forecast[203:,2]
#self.fc_conf_int = forecast[203:,3:]
# from gretl
self.forecast = forecast_results['fc111c'][-25:]
self.forecasterr = forecast_results['fc111cse'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn']
self.forecasterr_dyn = forecast_results['fc111cdynse']
else:
#from arima111_css_results import results
# coefs, bse, tvalues, and pvalues taken from R because gretl
# uses mean not constant
self.bse = [0.21583833, 0.03844939, 0.08566390]
self.params = [1.0087257, 0.9455393, -0.8021834]
self.sigma2 = 0.6355913
self.tvalues = [4.673524, 24.591788, -9.364311]
self.pvalues = [5.464467e-06, 0, 0]
self.cov_params = np.array([
[ 0.046586183, 0.002331183, -0.004647432 ],
[ 0.002331183, 0.001478356, -0.002726201 ],
[-0.004647432, -0.002726201, 0.007338304 ]])
# from gretl
self.llf = -239.6601
self.aic = 487.3202
self.bic = 500.5334
self.hqic = 492.6669
self.arroots = [1.0578 + 0j]
self.maroots = [1.2473 + 0j]
#cov_params = np.array([[0.00369569, -0.00271777, 0.00269806],
# [0, 0.00209573, -0.00224559],
# [0, 0, 0.00342769]])
#self.cov_params = cov_params + cov_params.T - \
# np.diag(np.diag(cov_params))
#self.bse = np.diag(np.sqrt(self.cov_params))
self.resid = [-0.015830, -0.236884, -0.093946, -0.281152,
-0.089983, -0.226336, -0.351666, -0.198703,
-0.258418, -0.259026, -0.149513, -0.325703,
-0.165703, -0.279229, -0.295711, -0.120018,
-0.289870, -0.154243, -0.348403, -0.273902,
-0.240894, -0.182791, -0.252930, -0.152441,
-0.296412, -0.128941, 0.024068, -0.243972,
-0.011436, -0.392437, -0.217022, -0.118190,
-0.133489, -0.045755, -0.169953, 0.025010,
-0.107754, -0.119661, 0.070794, -0.065586,
-0.080390, 0.007741, -0.016138, -0.235283,
-0.121907, -0.125546, -0.428463, -0.087713,
-0.298131, -0.277757, -0.261422, -0.248326,
-0.137826, -0.043771, 0.437100, -0.150051,
0.751890, 0.424180, 0.450514, 0.277089,
0.732583, 0.225086, -0.403648, -0.040509,
-0.132975, -0.112572, -0.696214, 0.003079,
-0.003491, -0.108758, 0.401383, -0.162302,
-0.141547, 0.175094, 0.245346, 0.607134, 0.519045,
0.248419, 0.920521, 1.097613, 0.755983, 1.271156,
1.216969, -0.121014, 0.340712, 0.732750, 0.068915,
0.603912, 0.060157, -0.803110, -1.044392, 1.040311,
-0.984497, -1.611668, -0.258198, -0.112970,
-0.091071, 0.226487, 0.097475, -0.311423, -0.061105,
-0.449488, 0.317277, -0.329734, -0.181248, 0.443263,
-2.223262, 0.096836, -0.033782, 0.456032, 0.476052,
0.197564, 0.263362, 0.021578, 0.216803, 0.284249,
0.343786, 0.196981, 0.773819, 0.169070, -0.343097,
0.918962, 0.096363, 0.298610, 1.571685, -0.236620,
-1.073822, -0.194208, -0.250742, -0.101530,
-0.076437, -0.056319, 0.059811, -0.041620,
-0.128404, -0.403446, 0.059654, -0.347208,
-0.095257, 0.217668, -0.015057, 0.087431, 0.275062,
-0.263580, -0.122746, 0.195629, 0.367272,
-0.184188, 0.146368, 0.127777, -0.587128,
-0.498538, 0.172490, -0.456741, -0.694000,
0.199392, -0.140634, -0.029636, 0.364818,
-0.097080, 0.510745, 0.230842, 0.595504, 0.709721,
0.012218, 0.520223, -0.445174, -0.168341,
-0.935465, -0.894203, 0.733417, -0.279707,
0.258861, 0.417969, -0.443542, -0.477955, 0.288992,
0.442126, 0.075826, 0.665759, 0.571509, -0.204055,
0.835901, -0.375693, 3.292828, -1.469299,
-0.122206, 0.617909, -2.250468, 0.570871, 1.166013,
0.079873, 0.463372, 1.981434, -0.142869, 3.023376,
-3.713161, -6.120150, -0.007487, 1.267027, 1.176930]
self.linear = [29.3658, 29.6069, 29.6339, 29.8312, 29.8400,
30.0663, 30.1617, 30.1187, 30.2384, 30.2990,
30.3595, 30.5457, 30.5457, 30.7192, 30.7757,
30.8100, 31.0399, 31.0942, 31.2984, 31.2939,
31.3609, 31.4628, 31.6329, 31.7324, 31.9464,
32.0089, 32.2559, 32.6940, 32.8614, 33.2924,
33.3170, 33.5182, 33.8335, 34.1458, 34.5700,
34.8750, 35.4078, 35.8197, 36.2292, 36.8656,
37.3804, 37.8923, 38.5161, 39.1353, 39.5219,
40.0255, 40.5285, 40.6877, 41.1981, 41.4778,
41.7614, 42.0483, 42.3378, 42.7438, 43.2629,
44.3501, 44.8481, 46.3758, 47.6495, 49.0229,
50.2674, 52.0749, 53.4036, 54.0405, 55.0330,
55.9126, 56.7962, 56.9969, 57.9035, 58.8088,
59.5986, 60.9623, 61.7415, 62.5249, 63.6547,
64.8929, 66.5810, 68.2516, 69.6795, 71.9024,
74.4440, 76.7288, 79.6830, 82.7210, 84.3593,
86.4672, 89.0311, 90.8961, 93.3398, 95.2031,
96.0444, 96.4597, 99.0845, 99.5117, 99.0582,
99.9130, 100.8911, 101.8735, 103.2025, 104.4114,
105.1611, 106.1495, 106.6827, 108.0297, 108.6812,
109.4567, 110.9233, 109.4032, 110.2338, 110.9440,
112.2239, 113.6024, 114.7366, 115.9784, 116.9832,
118.2158, 119.5562, 121.0030, 122.3262, 124.3309,
125.7431, 126.5810, 128.8036, 130.2014, 131.8283,
134.9366, 136.1738, 136.3942, 137.4507, 138.4015,
139.4764, 140.5563, 141.6402, 142.8416, 143.9284,
144.9034, 145.5403, 146.6472, 147.2953, 148.1823,
149.4151, 150.4126, 151.5249, 152.8636, 153.6227,
154.5044, 155.7327, 157.1842, 158.0536, 159.2722,
160.4871, 160.8985, 161.3275, 162.4567, 162.8940,
163.0006, 164.0406, 164.7296, 165.5352, 166.7971,
167.5893, 169.0692, 170.3045, 171.9903, 173.8878,
175.0798, 176.8452, 177.5683, 178.5355, 178.5942,
178.5666, 180.2797, 180.9411, 182.1820, 183.6435,
184.1780, 184.6110, 185.8579, 187.3242, 188.4342,
190.2285, 192.0041, 192.9641, 195.0757, 195.9072,
200.8693, 200.8222, 202.0821, 204.1505, 203.0031,
204.7540, 207.2581, 208.6696, 210.5136, 214.1399,
215.5866, 220.6022, 218.2942, 212.6785, 213.2020,
215.2081]
# forecasting isn't any different for css
# except you lose the first p+1 observations for in-sample
# these results are from x-12 arima
self.forecast = forecast_results['fc111c_css'][-25:]
self.forecasterr = forecast_results['fc111cse_css'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn_css']
self.forecasterr_dyn = forecast_results['fc111cdynse_css']
class ARIMA211(object):
def __init__(self, method="mle"):
if method == 'mle':
# from stata
from .arima111_results import results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
self.k_diff = 1
#their bse are OPG
#self.bse = np.diag(self.cov_params) ** .5
# from gretl
self.arroots = [1.027 + 0j, 5.7255+ 0j]
self.maroots = [1.1442+0j]
self.hqic = 496.5314
self.aic_gretl = 489.8388
self.bic_gretl = 506.3801
#self.bse = [0.248376, 0.102617, 0.0871312, 0.0696346]
self.tvalues = [3.468, 11.14, -1.941, 12.55]
self.pvalues = [.0005, 8.14e-29, .0522, 3.91e-36]
cov_params = np.array([
[0.0616906, -0.00250187, 0.0010129, 0.00260485],
[0, 0.0105302, -0.00867819, -0.00525614],
[ 0 ,0, 0.00759185, 0.00361962],
[ 0 ,0,0, 0.00484898]])
self.cov_params = cov_params + cov_params.T - \
np.diag(np.diag(cov_params))
self.bse = np.diag(np.sqrt(self.cov_params))
self.forecast = forecast_results['fc211c'][-25:]
self.forecasterr = forecast_results['fc211cse'][-25:]
self.forecast_dyn = forecast_results['fc211cdyn'][-25:]
self.forecasterr_dyn = forecast_results['fc211cdynse'][-25:]
else:
from .arima211_css_results import results
self.__dict__.update(results)
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
self.k_diff = 1
# from gretl
self.arroots = [1.0229 + 0j, 4.4501 + 0j]
self.maroots = [1.0604 + 0j]
self.hqic = 489.3225
self.aic_gretl = 482.6486
self.bic_gretl = 499.1402
self.tvalues = [.7206, 22.54, -19.04]
self.pvalues = [.4712, 1.52e-112, 2.19e-10, 8.00e-81]
cov_parmas = np.array([
[8.20496e-04, -0.0011992, 4.57078e-04, 0.00109907],
[0, 0.00284432, -0.0016752, -0.00220223],
[0, 0, 0.00119783, 0.00108868],
[0, 0, 0, 0.00245324]])
self.cov_params = cov_params + cov_params.T - \
np.diag(np.diag(cov_params))
self.bse = np.diag(np.sqrt(self.cov_params))
# forecasting isn't any different for css
# except you lose the first p+1 observations for in-sample
self.forecast = forecast_results['fc111c_css'][-25:]
self.forecasterr = forecast_results['fc111cse_css'][-25:]
self.forecast_dyn = forecast_results['fc111cdyn_css']
self.forecasterr_dyn = forecast_results['fc111cdynse_css']
class ARIMA112(object):
def __init__(self, method="mle"):
self.df_model = 3
self.k = 5
self.k_ar = 1
self.k_ma = 2
self.k_exog = 1
self.k_diff = 1
if method == "mle":
from .arima112_results import results
# from gretl
self.arroots = [1.0324 + 0j]
self.maroots = [1.1447 + 0j, -4.8613+0j]
self.hqic = 495.5852
self.aic_gretl = 488.8925
self.bic_gretl = 505.4338
self.tvalues = [3.454, 31.10, -7.994, -2.127]
self.pvalues = [0.0006, 2.1e-212, 1.31e-15, .0334]
cov_params = np.array([
[0.0620096, -0.00172172, 0.00181301, 0.00103271],
[0, 9.69682e-04, -9.70767e-04, -8.99814e-04],
[0, 0, 0.00698068, -0.00443871],
[0, 0, 0, 0.00713662]])
self.cov_params = cov_params + cov_params.T - \
np.diag(np.diag(cov_params))
self.bse = np.diag(np.sqrt(self.cov_params))
# from gretl
self.forecast = forecast_results['fc112c'][-25:]
self.forecasterr = forecast_results['fc112cse'][-25:]
self.forecast_dyn = forecast_results['fc112cdyn']
self.forecasterr_dyn = forecast_results['fc112cdynse']
# unpack stata results
self.__dict__ = results
self.resid = self.resid[1:]
self.params = self.params[:-1]
self.sigma2 = self.sigma**2
self.aic = self.icstats[4]
self.bic = self.icstats[5]
self.fittedvalues = self.xb[1:] # no idea why this initial value
self.linear = self.y[1:]
#their bse are OPG
#self.bse = np.diag(self.cov_params) ** .5
else: #NOTE: this looks like a "hard" problem
#unable to replicate stata's results even with their starting
#values
# unable to replicate x12 results in stata using their starting
# values. x-12 has better likelihood and we can replicate so
# use their results
#from arima112_css_results import results
# taken from R using X12-arima values as init params
self.bse = [0.07727588, 0.09356658, 0.10503567, 0.07727970]
self.params = [ 0.9053219, -0.692412, 1.0736728, 0.1720008]
self.sigma2 = 0.6820727
self.tvalues = [11.715452, -7.400215, 10.221983, 2.225692]
self.pvalues = [0, 3.791634e-12, 0, 2.716275e-02]
self.cov_params = np.array([
[ 0.0059715623, 0.001327824, -0.001592129, -0.0008061933],
[ 0.0013278238, 0.008754705, -0.008024634, -0.0045933413],
[-0.0015921293,-0.008024634, 0.011032492, 0.0072509641],
[-0.0008061933,-0.004593341, 0.007250964, 0.0059721516]])
# from x12arima via gretl
# gretl did not converge for this model...
self.llf = -246.7534
self.nobs = 202
#self.params = [.905322, -.692425, 1.07366, 0.172024]
#self.sigma2 = 0.682072819129
#self.bse = [0.0756430, 0.118440, 0.140691, 0.105266]
self.resid = resid = [-1.214477, -0.069772, -1.064510, -0.249555,
-0.874206, -0.322177, -1.003579, -0.310040, -0.890506,
-0.421211, -0.715219, -0.564119, -0.636560, -0.580912,
-0.717440, -0.424277, -0.747835, -0.424739, -0.805958,
-0.516877, -0.690127, -0.473072, -0.694766, -0.435627,
-0.736474, -0.388060, -0.429596, -0.557224, -0.342308,
-0.741842, -0.442199, -0.491319, -0.420884, -0.388057,
-0.466176, -0.257193, -0.429646, -0.349683, -0.205870,
-0.335547, -0.290300, -0.216572, -0.234272, -0.427951,
-0.255446, -0.338097, -0.579033, -0.213860, -0.556756,
-0.389907, -0.510060, -0.409759, -0.396778, -0.258727,
0.160063, -0.467109, 0.688004, -0.021120, 0.503044,
0.031500, 0.878365, -0.003548, -0.079327, 0.038289,
0.032773, -0.050780, -0.560124, 0.185655, -0.111981,
-0.020714, 0.363254, -0.218484, -0.006161, 0.165950,
0.252365, 0.599220, 0.488921, 0.347677, 1.079814,
1.102745, 0.959907, 1.570836, 1.454934, 0.343521,
1.125826, 1.154059, 0.666141, 1.269685, 0.551831,
-0.027476, -0.305192, 1.715665, -0.990662, -0.548239,
-0.011636, 0.197796, -0.050128, 0.480031, 0.061198,
-0.049562, 0.064436, -0.300420, 0.494730, -0.411527,
0.109242, 0.375255, -2.184482, 0.717733, -0.673064,
0.751681, -0.092543, 0.438016, -0.024881, 0.250085,
0.096010, 0.452618, 0.265491, 0.374299, 0.820424,
0.238176, -0.059646, 1.214061, 0.028679, 0.797567,
1.614444, -0.094717, -0.408067, 0.299198, -0.021561,
0.231915, 0.084190, 0.199192, 0.201132, 0.148509,
0.035431, -0.203352, 0.264744, -0.319785, 0.150305,
0.184628, 0.074637, 0.148340, 0.357372, -0.241250,
0.119294, 0.204413, 0.458730, -0.190477, 0.416587,
0.084216, -0.363361, -0.310339, 0.309728, -0.549677,
-0.449092, 0.183025, -0.259015, -0.000883, 0.267255,
-0.188068, 0.577697, 0.049310, 0.746401, 0.565829,
0.178270, 0.709983, -0.348012, 0.273262, -0.873288,
-0.403100, 0.720072, -0.428076, 0.488246, 0.248152,
-0.313214, -0.323137, 0.414843, 0.308909, 0.134180,
0.732275, 0.535639, -0.056128, 1.128355, -0.449151,
3.879123, -2.303860, 1.712549, -0.074407, -1.162052,
0.848316, 1.262031, 0.009320, 1.017563, 1.978597,
-0.001637, 3.782223, -4.119563, -3.666488, 0.345244,
0.869998, 0.635321]
self.linear = [30.5645, 29.4398, 30.6045, 29.7996, 30.6242,
30.1622, 30.8136, 30.2300, 30.8705, 30.4612, 30.9252,
30.7841, 31.0166, 31.0209, 31.1974, 31.1143, 31.4978,
31.3647, 31.7560, 31.5369, 31.8101, 31.7531, 32.0748,
32.0156, 32.3865, 32.2681, 32.7096, 33.0072, 33.1923,
33.6418, 33.5422, 33.8913, 34.1209, 34.4881, 34.8662,
35.1572, 35.7296, 36.0497, 36.5059, 37.1355, 37.5903,
38.1166, 38.7343, 39.3280, 39.6554, 40.2381, 40.6790,
40.8139, 41.4568, 41.5899, 42.0101, 42.2098, 42.5968,
42.9587, 43.5399, 44.6671, 44.9120, 46.8211, 47.5970,
49.2685, 50.1216, 52.3035, 53.0793, 53.9617, 54.8672,
55.8508, 56.6601, 56.8143, 58.0120, 58.7207, 59.6367,
61.0185, 61.6062, 62.5340, 63.6476, 64.9008, 66.6111,
68.1523, 69.5202, 71.8973, 74.2401, 76.4292, 79.4451,
82.2565, 83.5742, 86.0459, 88.4339, 90.2303, 92.8482,
94.4275, 95.3052, 95.7843, 99.0907, 98.4482, 98.8116,
99.6022, 100.8501, 101.6200, 103.2388, 104.1496,
105.0356, 106.0004, 106.5053, 108.1115, 108.3908,
109.5247, 110.8845, 108.7823, 110.8731, 110.6483,
112.7925, 113.3620, 115.0249, 115.7499, 117.1040,
118.0474, 119.6345, 120.8257, 122.2796, 124.2618,
125.4596, 126.2859, 128.8713, 129.7024, 131.7856,
134.7947, 135.5081, 135.9008, 137.2216, 138.0681,
139.3158, 140.3008, 141.4989, 142.6515, 143.7646,
144.7034, 145.3353, 146.6198, 147.0497, 148.2154,
149.3254, 150.3517, 151.4426, 152.8413, 153.3807,
154.4956, 155.6413, 157.1905, 157.7834, 159.3158,
160.2634, 160.7103, 161.1903, 162.5497, 162.6491,
163.0170, 164.1590, 164.7009, 165.6327, 166.8881,
167.5223, 169.2507, 170.1536, 172.1342, 173.7217,
174.8900, 176.7480, 177.1267, 178.4733, 178.1031,
178.5799, 180.4281, 180.7118, 182.3518, 183.5132,
184.0231, 184.4852, 185.9911, 187.2658, 188.3677,
190.2644, 191.8561, 192.6716, 195.1492, 195.3209,
201.7039, 198.9875, 202.7744, 203.0621, 202.7257,
204.6580, 207.3287, 208.1154, 210.5164, 213.9986,
214.8278, 221.0086, 215.8405, 212.3258, 213.5990,
215.7497]
self.yr = []
self.arroots = [-1.4442 + 0j]
self.maroots = [-1.1394 + 0j, -5.1019+0j]
self.hqic = 510.1902
self.aic = 503.5069
self.bic = 520.0234
#self.tvalues = [11.97, -5.846, 7.631, 1.634]
#self.pvalues = [5.21e-33, 5.03e-9, 2.32e-14, .1022]
#cov_params = np.array([
# [0.0620096, -0.00172172, 0.00181301, 0.00103271],
# [0, 9.69682e-04, -9.70767e-04, -8.99814e-04],
# [0, 0, 0.00698068, -0.00443871],
# [0, 0, 0, 0.00713662]])
#self.cov_params = cov_params + cov_params.T - \
# np.diag(np.diag(cov_params))
#self.bse = np.diag(np.sqrt(self.cov_params))
self.forecast = forecast_results['fc112c_css'][-25:]
self.forecasterr = forecast_results['fc112cse_css'][-25:]
self.forecast_dyn = forecast_results['fc112cdyn_css']
self.forecasterr_dyn = forecast_results['fc112cdynse_css']
| bsd-3-clause |
superdesk/Live-Blog | documentor/libraries/docutils-0.9.1-py3.2/docutils/parsers/rst/directives/images.py | 2 | 6813 | # $Id: images.py 7256 2011-12-14 23:53:38Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib.request, urllib.parse, urllib.error
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.request.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| agpl-3.0 |
jolyonb/edx-platform | common/lib/xmodule/xmodule/x_module.py | 1 | 77933 | from __future__ import absolute_import
import logging
import os
import sys
import time
from collections import namedtuple
from functools import partial
from pkg_resources import resource_exists, resource_isdir, resource_listdir, resource_string
import six
import yaml
from contracts import contract, new_contract
from lazy import lazy
from lxml import etree
from opaque_keys.edx.asides import AsideDefinitionKeyV2, AsideUsageKeyV2
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangolib.markup import HTML
from six import text_type
from six.moves import map
from web_fragments.fragment import Fragment
from webob import Response
from webob.multidict import MultiDict
from xblock.core import XBlock, XBlockAside
from xblock.fields import (
Dict,
Float,
Integer,
List,
Reference,
ReferenceList,
ReferenceValueDict,
Scope,
ScopeIds,
String,
UserScope
)
from xblock.runtime import IdGenerator, IdReader, Runtime
from xmodule import block_metadata_utils
from xmodule.errortracker import exc_info_to_str
from xmodule.exceptions import UndefinedContext
from xmodule.fields import RelativeTime
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.util.xmodule_django import add_webpack_to_fragment
log = logging.getLogger(__name__)
XMODULE_METRIC_NAME = 'edxapp.xmodule'
XMODULE_DURATION_METRIC_NAME = XMODULE_METRIC_NAME + '.duration'
XMODULE_METRIC_SAMPLE_RATE = 0.1
# Stats event sent to DataDog in order to determine if old XML parsing can be deprecated.
DEPRECATION_VSCOMPAT_EVENT = 'deprecation.vscompat'
# xblock view names
# This is the view that will be rendered to display the XBlock in the LMS.
# It will also be used to render the block in "preview" mode in Studio, unless
# the XBlock also implements author_view.
STUDENT_VIEW = 'student_view'
# This is the view that will be rendered to display the XBlock in the LMS for unenrolled learners.
# Implementations of this view should assume that a user and user data are not available.
PUBLIC_VIEW = 'public_view'
# An optional view of the XBlock similar to student_view, but with possible inline
# editing capabilities. This view differs from studio_view in that it should be as similar to student_view
# as possible. When previewing XBlocks within Studio, Studio will prefer author_view to student_view.
AUTHOR_VIEW = 'author_view'
# The view used to render an editor in Studio. The editor rendering can be completely different
# from the LMS student_view, and it is only shown when the author selects "Edit".
STUDIO_VIEW = 'studio_view'
# Views that present a "preview" view of an xblock (as opposed to an editing view).
PREVIEW_VIEWS = [STUDENT_VIEW, PUBLIC_VIEW, AUTHOR_VIEW]
DEFAULT_PUBLIC_VIEW_MESSAGE = (
u'This content is only accessible to enrolled learners. '
u'Sign in or register, and enroll in this course to view it.'
)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class OpaqueKeyReader(IdReader):
"""
IdReader for :class:`DefinitionKey` and :class:`UsageKey`s.
"""
def get_definition_id(self, usage_id):
"""Retrieve the definition that a usage is derived from.
Args:
usage_id: The id of the usage to query
Returns:
The `definition_id` the usage is derived from
"""
raise NotImplementedError("Specific Modulestores must implement get_definition_id")
def get_block_type(self, def_id):
"""Retrieve the block_type of a particular definition
Args:
def_id: The id of the definition to query
Returns:
The `block_type` of the definition
"""
return def_id.block_type
def get_usage_id_from_aside(self, aside_id):
"""
Retrieve the XBlock `usage_id` associated with this aside usage id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `usage_id` of the usage the aside is commenting on.
"""
return aside_id.usage_key
def get_definition_id_from_aside(self, aside_id):
"""
Retrieve the XBlock `definition_id` associated with this aside definition id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `definition_id` of the usage the aside is commenting on.
"""
return aside_id.definition_key
def get_aside_type_from_usage(self, aside_id):
"""
Retrieve the XBlockAside `aside_type` associated with this aside
usage id.
Args:
aside_id: The usage id of the XBlockAside.
Returns:
The `aside_type` of the aside.
"""
return aside_id.aside_type
def get_aside_type_from_definition(self, aside_id):
"""
Retrieve the XBlockAside `aside_type` associated with this aside
definition id.
Args:
aside_id: The definition id of the XBlockAside.
Returns:
The `aside_type` of the aside.
"""
return aside_id.aside_type
class AsideKeyGenerator(IdGenerator):
"""
An :class:`.IdGenerator` that only provides facilities for constructing new XBlockAsides.
"""
def create_aside(self, definition_id, usage_id, aside_type):
"""
Make a new aside definition and usage ids, indicating an :class:`.XBlockAside` of type `aside_type`
commenting on an :class:`.XBlock` usage `usage_id`
Returns:
(aside_definition_id, aside_usage_id)
"""
def_key = AsideDefinitionKeyV2(definition_id, aside_type)
usage_key = AsideUsageKeyV2(usage_id, aside_type)
return (def_key, usage_key)
def create_usage(self, def_id):
"""Make a usage, storing its definition id.
Returns the newly-created usage id.
"""
raise NotImplementedError("Specific Modulestores must provide implementations of create_usage")
def create_definition(self, block_type, slug=None):
"""Make a definition, storing its block type.
If `slug` is provided, it is a suggestion that the definition id
incorporate the slug somehow.
Returns the newly-created definition id.
"""
raise NotImplementedError("Specific Modulestores must provide implementations of create_definition")
def dummy_track(_event_type, _event):
pass
class HTMLSnippet(object):
"""
A base class defining an interface for an object that is able to present an
html snippet, along with associated javascript and css
"""
js = {}
js_module_name = None
preview_view_js = {}
studio_view_js = {}
css = {}
preview_view_css = {}
studio_view_css = {}
@classmethod
def get_javascript(cls):
"""
Return a dictionary containing some of the following keys:
coffee: A list of coffeescript fragments that should be compiled and
placed on the page
js: A list of javascript fragments that should be included on the
page
All of these will be loaded onto the page in the CMS
"""
# cdodge: We've moved the xmodule.coffee script from an outside directory into the xmodule area of common
# this means we need to make sure that all xmodules include this dependency which had been previously implicitly
# fulfilled in a different area of code
coffee = cls.js.setdefault('coffee', [])
js = cls.js.setdefault('js', [])
# Added xmodule.js separately to enforce 000 prefix for this only.
cls.js.setdefault('xmodule_js', resource_string(__name__, 'js/src/xmodule.js'))
return cls.js
@classmethod
def get_preview_view_js(cls):
if issubclass(cls, XModule):
return cls.get_javascript()
return cls.preview_view_js
@classmethod
def get_preview_view_js_bundle_name(cls):
if issubclass(cls, XModule):
return cls.__name__
return cls.__name__ + 'Preview'
@classmethod
def get_studio_view_js(cls):
if issubclass(cls, XModuleDescriptor):
return cls.get_javascript()
return cls.studio_view_js
@classmethod
def get_studio_view_js_bundle_name(cls):
if issubclass(cls, XModuleDescriptor):
return cls.__name__
return cls.__name__ + 'Studio'
@classmethod
def get_css(cls):
"""
Return a dictionary containing some of the following keys:
css: A list of css fragments that should be applied to the html
contents of the snippet
sass: A list of sass fragments that should be applied to the html
contents of the snippet
scss: A list of scss fragments that should be applied to the html
contents of the snippet
"""
return cls.css
@classmethod
def get_preview_view_css(cls):
if issubclass(cls, XModule):
return cls.get_css()
return cls.preview_view_css
@classmethod
def get_studio_view_css(cls):
if issubclass(cls, XModuleDescriptor):
return cls.get_css()
return cls.studio_view_css
def get_html(self):
"""
Return the html used to display this snippet
"""
raise NotImplementedError(
"get_html() must be provided by specific modules - not present in {0}"
.format(self.__class__))
def shim_xmodule_js(fragment, js_module_name):
"""
Set up the XBlock -> XModule shim on the supplied :class:`web_fragments.fragment.Fragment`
"""
# Delay this import so that it is only used (and django settings are parsed) when
# they are required (rather than at startup)
import webpack_loader.utils
if not fragment.js_init_fn:
fragment.initialize_js('XBlockToXModuleShim')
fragment.json_init_args = {'xmodule-type': js_module_name}
add_webpack_to_fragment(fragment, 'XModuleShim')
class XModuleFields(object):
"""
Common fields for XModules.
"""
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=None
)
class XModuleMixin(XModuleFields, XBlock):
"""
Fields and methods used by XModules internally.
Adding this Mixin to an :class:`XBlock` allows it to cooperate with old-style :class:`XModules`
"""
# Attributes for inspection of the descriptor
# This indicates whether the xmodule is a problem-type.
# It should respond to max_score() and grade(). It can be graded or ungraded
# (like a practice problem).
has_score = False
# Whether this module can be displayed in read-only mode. It is safe to set this to True if
# all user state is handled through the FieldData API.
show_in_read_only_mode = False
# Class level variable
# True if this descriptor always requires recalculation of grades, for
# example if the score can change via an extrnal service, not just when the
# student interacts with the module on the page. A specific example is
# FoldIt, which posts grade-changing updates through a separate API.
always_recalculate_grades = False
# The default implementation of get_icon_class returns the icon_class
# attribute of the class
#
# This attribute can be overridden by subclasses, and
# the function can also be overridden if the icon class depends on the data
# in the module
icon_class = 'other'
def __init__(self, *args, **kwargs):
self.xmodule_runtime = None
self._asides = []
super(XModuleMixin, self).__init__(*args, **kwargs)
@property
def runtime(self):
return CombinedSystem(self.xmodule_runtime, self._runtime)
@runtime.setter
def runtime(self, value):
self._runtime = value
@property
def system(self):
"""
Return the XBlock runtime (backwards compatibility alias provided for XModules).
"""
return self.runtime
@property
def course_id(self):
return self.location.course_key
@property
def category(self):
return self.scope_ids.block_type
@property
def location(self):
return self.scope_ids.usage_id
@location.setter
def location(self, value):
assert isinstance(value, UsageKey)
self.scope_ids = self.scope_ids._replace(
def_id=value,
usage_id=value,
)
@property
def url_name(self):
return block_metadata_utils.url_name_for_block(self)
@property
def display_name_with_default(self):
"""
Return a display name for the module: use display_name if defined in
metadata, otherwise convert the url name.
"""
return block_metadata_utils.display_name_with_default(self)
@property
def display_name_with_default_escaped(self):
"""
DEPRECATED: use display_name_with_default
Return an html escaped display name for the module: use display_name if
defined in metadata, otherwise convert the url name.
Note: This newly introduced method should not be used. It was only
introduced to enable a quick search/replace and the ability to slowly
migrate and test switching to display_name_with_default, which is no
longer escaped.
"""
# xss-lint: disable=python-deprecated-display-name
return block_metadata_utils.display_name_with_default_escaped(self)
@property
def tooltip_title(self):
"""
Return the title for the sequence item containing this xmodule as its top level item.
"""
return self.display_name_with_default
@property
def xblock_kvs(self):
"""
Retrieves the internal KeyValueStore for this XModule.
Should only be used by the persistence layer. Use with caution.
"""
# if caller wants kvs, caller's assuming it's up to date; so, decache it
self.save()
return self._field_data._kvs # pylint: disable=protected-access
@lazy
def _unwrapped_field_data(self):
"""
This property hold the value _field_data here before we wrap it in
the LmsFieldData or OverrideFieldData classes.
"""
return self._field_data
def add_aside(self, aside):
"""
save connected asides
"""
self._asides.append(aside)
def get_asides(self):
"""
get the list of connected asides
"""
return self._asides
def get_explicitly_set_fields_by_scope(self, scope=Scope.content):
"""
Get a dictionary of the fields for the given scope which are set explicitly on this xblock. (Including
any set to None.)
"""
result = {}
for field in self.fields.values():
if field.scope == scope and field.is_set_on(self):
try:
result[field.name] = field.read_json(self)
except TypeError as exception:
exception_message = "{message}, Block-location:{location}, Field-name:{field_name}".format(
message=text_type(exception),
location=text_type(self.location),
field_name=field.name
)
raise TypeError(exception_message)
return result
def has_children_at_depth(self, depth):
r"""
Returns true if self has children at the given depth. depth==0 returns
false if self is a leaf, true otherwise.
SELF
|
[child at depth 0]
/ \
[depth 1] [depth 1]
/ \
[depth 2] [depth 2]
So the example above would return True for `has_children_at_depth(2)`, and False
for depth > 2
"""
if depth < 0:
raise ValueError("negative depth argument is invalid")
elif depth == 0:
return bool(self.get_children())
else:
return any(child.has_children_at_depth(depth - 1) for child in self.get_children())
def get_content_titles(self):
r"""
Returns list of content titles for all of self's children.
SEQUENCE
|
VERTICAL
/ \
SPLIT_TEST DISCUSSION
/ \
VIDEO A VIDEO B
Essentially, this function returns a list of display_names (e.g. content titles)
for all of the leaf nodes. In the diagram above, calling get_content_titles on
SEQUENCE would return the display_names of `VIDEO A`, `VIDEO B`, and `DISCUSSION`.
This is most obviously useful for sequence_modules, which need this list to display
tooltips to users, though in theory this should work for any tree that needs
the display_names of all its leaf nodes.
"""
if self.has_children:
return sum((child.get_content_titles() for child in self.get_children()), [])
else:
# xss-lint: disable=python-deprecated-display-name
return [self.display_name_with_default_escaped]
def get_children(self, usage_id_filter=None, usage_key_filter=None): # pylint: disable=arguments-differ
"""Returns a list of XBlock instances for the children of
this module"""
# Be backwards compatible with callers using usage_key_filter
if usage_id_filter is None and usage_key_filter is not None:
usage_id_filter = usage_key_filter
return [
child
for child
in super(XModuleMixin, self).get_children(usage_id_filter)
if child is not None
]
def get_child(self, usage_id):
"""
Return the child XBlock identified by ``usage_id``, or ``None`` if there
is an error while retrieving the block.
"""
try:
child = super(XModuleMixin, self).get_child(usage_id)
except ItemNotFoundError:
log.warning(u'Unable to load item %s, skipping', usage_id)
return None
if child is None:
return None
child.runtime.export_fs = self.runtime.export_fs
return child
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module"""
return []
def get_display_items(self):
"""
Returns a list of descendent module instances that will display
immediately inside this module.
"""
items = []
for child in self.get_children():
items.extend(child.displayable_items())
return items
def displayable_items(self):
"""
Returns list of displayable modules contained by this module. If this
module is visible, should return [self].
"""
return [self]
def get_child_by(self, selector):
"""
Return a child XBlock that matches the specified selector
"""
for child in self.get_children():
if selector(child):
return child
return None
def get_icon_class(self):
"""
Return a css class identifying this module in the context of an icon
"""
return self.icon_class
def has_dynamic_children(self):
"""
Returns True if this descriptor has dynamic children for a given
student when the module is created.
Returns False if the children of this descriptor are the same
children that the module will return for any student.
"""
return False
# Functions used in the LMS
def get_score(self):
"""
Score the student received on the problem, or None if there is no
score.
Returns:
dictionary
{'score': integer, from 0 to get_max_score(),
'total': get_max_score()}
NOTE (vshnayder): not sure if this was the intended return value, but
that's what it's doing now. I suspect that we really want it to just
return a number. Would need to change (at least) capa to match if we did that.
"""
return None
def max_score(self):
""" Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
* In practice, this is a Very Bad Idea, and (a) will break some code
in place (although that code should get fixed), and (b) break some
analytics we plan to put in place.
"""
return None
def get_progress(self):
""" Return a progress.Progress object that represents how far the
student has gone in this module. Must be implemented to get correct
progress tracking behavior in nesting modules like sequence and
vertical.
If this module has no notion of progress, return None.
"""
return None
def bind_for_student(self, xmodule_runtime, user_id, wrappers=None):
"""
Set up this XBlock to act as an XModule instead of an XModuleDescriptor.
Arguments:
xmodule_runtime (:class:`ModuleSystem'): the runtime to use when accessing student facing methods
user_id: The user_id to set in scope_ids
wrappers: These are a list functions that put a wrapper, such as
LmsFieldData or OverrideFieldData, around the field_data.
Note that the functions will be applied in the order in
which they're listed. So [f1, f2] -> f2(f1(field_data))
"""
# pylint: disable=attribute-defined-outside-init
# Skip rebinding if we're already bound a user, and it's this user.
if self.scope_ids.user_id is not None and user_id == self.scope_ids.user_id:
if getattr(xmodule_runtime, 'position', None):
self.position = xmodule_runtime.position # update the position of the tab
return
# If we are switching users mid-request, save the data from the old user.
self.save()
# Update scope_ids to point to the new user.
self.scope_ids = self.scope_ids._replace(user_id=user_id)
# Clear out any cached instantiated children.
self.clear_child_cache()
# Clear out any cached field data scoped to the old user.
for field in self.fields.values():
if field.scope in (Scope.parent, Scope.children):
continue
if field.scope.user == UserScope.ONE:
field._del_cached_value(self) # pylint: disable=protected-access
# not the most elegant way of doing this, but if we're removing
# a field from the module's field_data_cache, we should also
# remove it from its _dirty_fields
if field in self._dirty_fields:
del self._dirty_fields[field]
# Set the new xmodule_runtime and field_data (which are user-specific)
self.xmodule_runtime = xmodule_runtime
if wrappers is None:
wrappers = []
wrapped_field_data = self._unwrapped_field_data
for wrapper in wrappers:
wrapped_field_data = wrapper(wrapped_field_data)
self._field_data = wrapped_field_data
@property
def non_editable_metadata_fields(self):
"""
Return the list of fields that should not be editable in Studio.
When overriding, be sure to append to the superclasses' list.
"""
# We are not allowing editing of xblock tag and name fields at this time (for any component).
return [XBlock.tags, XBlock.name]
@property
def editable_metadata_fields(self):
"""
Returns the metadata fields to be edited in Studio. These are fields with scope `Scope.settings`.
Can be limited by extending `non_editable_metadata_fields`.
"""
metadata_fields = {}
# Only use the fields from this class, not mixins
fields = getattr(self, 'unmixed_class', self.__class__).fields
for field in fields.values():
if field in self.non_editable_metadata_fields:
continue
if field.scope not in (Scope.settings, Scope.content):
continue
metadata_fields[field.name] = self._create_metadata_editor_info(field)
return metadata_fields
def _create_metadata_editor_info(self, field):
"""
Creates the information needed by the metadata editor for a specific field.
"""
def jsonify_value(field, json_choice):
"""
Convert field value to JSON, if needed.
"""
if isinstance(json_choice, dict):
new_json_choice = dict(json_choice) # make a copy so below doesn't change the original
if 'display_name' in json_choice:
new_json_choice['display_name'] = get_text(json_choice['display_name'])
if 'value' in json_choice:
new_json_choice['value'] = field.to_json(json_choice['value'])
else:
new_json_choice = field.to_json(json_choice)
return new_json_choice
def get_text(value):
"""Localize a text value that might be None."""
if value is None:
return None
else:
return self.runtime.service(self, "i18n").ugettext(value)
# gets the 'default_value' and 'explicitly_set' attrs
metadata_field_editor_info = self.runtime.get_field_provenance(self, field)
metadata_field_editor_info['field_name'] = field.name
metadata_field_editor_info['display_name'] = get_text(field.display_name)
metadata_field_editor_info['help'] = get_text(field.help)
metadata_field_editor_info['value'] = field.read_json(self)
# We support the following editors:
# 1. A select editor for fields with a list of possible values (includes Booleans).
# 2. Number editors for integers and floats.
# 3. A generic string editor for anything else (editing JSON representation of the value).
editor_type = "Generic"
values = field.values
if "values_provider" in field.runtime_options:
values = field.runtime_options['values_provider'](self)
if isinstance(values, (tuple, list)) and len(values) > 0:
editor_type = "Select"
values = [jsonify_value(field, json_choice) for json_choice in values]
elif isinstance(field, Integer):
editor_type = "Integer"
elif isinstance(field, Float):
editor_type = "Float"
elif isinstance(field, List):
editor_type = "List"
elif isinstance(field, Dict):
editor_type = "Dict"
elif isinstance(field, RelativeTime):
editor_type = "RelativeTime"
elif isinstance(field, String) and field.name == "license":
editor_type = "License"
metadata_field_editor_info['type'] = editor_type
metadata_field_editor_info['options'] = [] if values is None else values
return metadata_field_editor_info
def public_view(self, _context):
"""
Default message for blocks that don't implement public_view
"""
alert_html = HTML(
u'<div class="page-banner"><div class="alert alert-warning">'
u'<span class="icon icon-alert fa fa fa-warning" aria-hidden="true"></span>'
u'<div class="message-content">{}</div></div></div>'
)
if self.display_name:
display_text = _(
u'{display_name} is only accessible to enrolled learners. '
'Sign in or register, and enroll in this course to view it.'
).format(
display_name=self.display_name
)
else:
display_text = _(DEFAULT_PUBLIC_VIEW_MESSAGE)
return Fragment(alert_html.format(display_text))
class ProxyAttribute(object):
"""
A (python) descriptor that proxies attribute access.
For example:
class Foo(object):
def __init__(self, value):
self.foo_attr = value
class Bar(object):
foo = Foo('x')
foo_attr = ProxyAttribute('foo', 'foo_attr')
bar = Bar()
assert bar.foo_attr == 'x'
bar.foo_attr = 'y'
assert bar.foo.foo_attr == 'y'
del bar.foo_attr
assert not hasattr(bar.foo, 'foo_attr')
"""
def __init__(self, source, name):
"""
:param source: The name of the attribute to proxy to
:param name: The name of the attribute to proxy
"""
self._source = source
self._name = name
def __get__(self, instance, owner):
if instance is None:
return self
return getattr(getattr(instance, self._source), self._name)
def __set__(self, instance, value):
setattr(getattr(instance, self._source), self._name, value)
def __delete__(self, instance):
delattr(getattr(instance, self._source), self._name)
module_attr = partial(ProxyAttribute, '_xmodule') # pylint: disable=invalid-name
descriptor_attr = partial(ProxyAttribute, 'descriptor') # pylint: disable=invalid-name
module_runtime_attr = partial(ProxyAttribute, 'xmodule_runtime') # pylint: disable=invalid-name
class XModuleToXBlockMixin(object):
"""
Common code needed by XModule and XBlocks converted from XModules.
"""
@property
def ajax_url(self):
"""
Returns the URL for the ajax handler.
"""
return self.runtime.handler_url(self, 'xmodule_handler', '', '').rstrip('/?')
@XBlock.handler
def xmodule_handler(self, request, suffix=None):
"""
XBlock handler that wraps `handle_ajax`
"""
class FileObjForWebobFiles(object):
"""
Turn Webob cgi.FieldStorage uploaded files into pure file objects.
Webob represents uploaded files as cgi.FieldStorage objects, which
have a .file attribute. We wrap the FieldStorage object, delegating
attribute access to the .file attribute. But the files have no
name, so we carry the FieldStorage .filename attribute as the .name.
"""
def __init__(self, webob_file):
self.file = webob_file.file
self.name = webob_file.filename
def __getattr__(self, name):
return getattr(self.file, name)
# WebOb requests have multiple entries for uploaded files. handle_ajax
# expects a single entry as a list.
request_post = MultiDict(request.POST)
for key in set(six.iterkeys(request.POST)):
if hasattr(request.POST[key], "file"):
request_post[key] = list(map(FileObjForWebobFiles, request.POST.getall(key)))
response_data = self.handle_ajax(suffix, request_post)
return Response(response_data, content_type='application/json', charset='UTF-8')
@XBlock.needs("i18n")
class XModule(XModuleToXBlockMixin, HTMLSnippet, XModuleMixin):
""" Implements a generic learning module.
Subclasses must at a minimum provide a definition for get_html in order
to be displayed to users.
See the HTML module for a simple example.
"""
entry_point = "xmodule.v1"
has_score = descriptor_attr('has_score')
max_score = descriptor_attr('max_score')
show_in_read_only_mode = descriptor_attr('show_in_read_only_mode')
_field_data_cache = descriptor_attr('_field_data_cache')
_field_data = descriptor_attr('_field_data')
_dirty_fields = descriptor_attr('_dirty_fields')
def __init__(self, descriptor, *args, **kwargs):
"""
Construct a new xmodule
runtime: An XBlock runtime allowing access to external resources
descriptor: the XModuleDescriptor that this module is an instance of.
field_data: A dictionary-like object that maps field names to values
for those fields.
"""
# Set the descriptor first so that we can proxy to it
self.descriptor = descriptor
self._runtime = None
super(XModule, self).__init__(*args, **kwargs)
self.runtime.xmodule_instance = self
@property
def runtime(self):
return CombinedSystem(self._runtime, self.descriptor._runtime) # pylint: disable=protected-access
@runtime.setter
def runtime(self, value): # pylint: disable=arguments-differ
self._runtime = value
def __unicode__(self):
# xss-lint: disable=python-wrap-html
return u'<x_module(id={0})>'.format(self.id)
def handle_ajax(self, _dispatch, _data):
""" dispatch is last part of the URL.
data is a dictionary-like object with the content of the request"""
return u""
def get_child(self, usage_id):
if usage_id in self._child_cache:
return self._child_cache[usage_id]
# Take advantage of the children cache that the descriptor might have
child_descriptor = self.descriptor.get_child(usage_id)
child_block = None
if child_descriptor is not None:
child_block = self.system.get_module(child_descriptor)
self._child_cache[usage_id] = child_block
return child_block
def get_child_descriptors(self):
"""
Returns the descriptors of the child modules
Overriding this changes the behavior of get_children and
anything that uses get_children, such as get_display_items.
This method will not instantiate the modules of the children
unless absolutely necessary, so it is cheaper to call than get_children
These children will be the same children returned by the
descriptor unless descriptor.has_dynamic_children() is true.
"""
return self.descriptor.get_children()
def displayable_items(self):
"""
Returns list of displayable modules contained by this module. If this
module is visible, should return [self].
"""
return [self.descriptor]
# ~~~~~~~~~~~~~~~ XBlock API Wrappers ~~~~~~~~~~~~~~~~
def student_view(self, context):
"""
Return a fragment with the html from this XModule
Doesn't yet add any of the javascript to the fragment, nor the css.
Also doesn't expect any javascript binding, yet.
Makes no use of the context parameter
"""
return Fragment(self.get_html())
def policy_key(location):
"""
Get the key for a location in a policy file. (Since the policy file is
specific to a course, it doesn't need the full location url).
"""
return u'{cat}/{name}'.format(cat=location.block_type, name=location.block_id)
Template = namedtuple("Template", "metadata data children")
class ResourceTemplates(object):
"""
Gets the templates associated w/ a containing cls. The cls must have a 'template_dir_name' attribute.
It finds the templates as directly in this directory under 'templates'.
"""
template_packages = [__name__]
@classmethod
def templates(cls):
"""
Returns a list of dictionary field: value objects that describe possible templates that can be used
to seed a module of this type.
Expects a class attribute template_dir_name that defines the directory
inside the 'templates' resource directory to pull templates from
"""
templates = []
dirname = cls.get_template_dir()
if dirname is not None:
for pkg in cls.template_packages:
if not resource_isdir(pkg, dirname):
continue
for template_file in resource_listdir(pkg, dirname):
if not template_file.endswith('.yaml'):
log.warning("Skipping unknown template file %s", template_file)
continue
template_content = resource_string(pkg, os.path.join(dirname, template_file))
template = yaml.safe_load(template_content)
template['template_id'] = template_file
templates.append(template)
return templates
@classmethod
def get_template_dir(cls):
if getattr(cls, 'template_dir_name', None):
dirname = os.path.join('templates', cls.template_dir_name)
if not resource_isdir(__name__, dirname):
log.warning(u"No resource directory {dir} found when loading {cls_name} templates".format(
dir=dirname,
cls_name=cls.__name__,
))
return None
else:
return dirname
else:
return None
@classmethod
def get_template(cls, template_id):
"""
Get a single template by the given id (which is the file name identifying it w/in the class's
template_dir_name)
"""
dirname = cls.get_template_dir()
if dirname is not None:
path = os.path.join(dirname, template_id)
for pkg in cls.template_packages:
if resource_exists(pkg, path):
template_content = resource_string(pkg, path)
template = yaml.safe_load(template_content)
template['template_id'] = template_id
return template
class XModuleDescriptorToXBlockMixin(object):
"""
Common code needed by XModuleDescriptor and XBlocks converted from XModules.
"""
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
@classmethod
def _translate(cls, key):
return cls.metadata_translations.get(key, key)
# ================================= XML PARSING ============================
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
# It'd be great to not reserialize and deserialize the xml
xml = etree.tostring(node)
block = cls.from_xml(xml, runtime, id_generator)
return block
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
raise NotImplementedError('Modules must implement from_xml to be parsable from xml')
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
xml_string = self.export_to_xml(self.runtime.export_fs)
exported_node = etree.fromstring(xml_string)
node.tag = exported_node.tag
node.text = exported_node.text
node.tail = exported_node.tail
for key, value in exported_node.items():
if key == 'url_name' and value == 'course' and key in node.attrib:
# if url_name is set in ExportManager then do not override it here.
continue
node.set(key, value)
node.extend(list(exported_node))
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
raise NotImplementedError('Modules must implement export_to_xml to enable xml export')
@XBlock.needs("i18n")
class XModuleDescriptor(XModuleDescriptorToXBlockMixin, HTMLSnippet, ResourceTemplates, XModuleMixin):
"""
An XModuleDescriptor is a specification for an element of a course. This
could be a problem, an organizational element (a group of content), or a
segment of video, for example.
XModuleDescriptors are independent and agnostic to the current student state
on a problem. They handle the editing interface used by instructors to
create a problem, and can generate XModules (which do know about student
state).
"""
entry_point = "xmodule.v1"
module_class = XModule
# ============================= STRUCTURAL MANIPULATION ===================
def __init__(self, *args, **kwargs):
"""
Construct a new XModuleDescriptor. The only required arguments are the
system, used for interaction with external resources, and the
definition, which specifies all the data needed to edit and display the
problem (but none of the associated metadata that handles recordkeeping
around the problem).
This allows for maximal flexibility to add to the interface while
preserving backwards compatibility.
runtime: A DescriptorSystem for interacting with external resources
field_data: A dictionary-like object that maps field names to values
for those fields.
XModuleDescriptor.__init__ takes the same arguments as xblock.core:XBlock.__init__
"""
super(XModuleDescriptor, self).__init__(*args, **kwargs)
# update_version is the version which last updated this xblock v prev being the penultimate updater
# leaving off original_version since it complicates creation w/o any obv value yet and is computable
# by following previous until None
# definition_locator is only used by mongostores which separate definitions from blocks
self.previous_version = self.update_version = self.definition_locator = None
self.xmodule_runtime = None
def editor_saved(self, user, old_metadata, old_content):
"""
This method is called when "Save" is pressed on the Studio editor.
Note that after this method is called, the modulestore update_item method will
be called on this xmodule. Therefore, any modifications to the xmodule that are
performed in editor_saved will automatically be persisted (calling update_item
from implementors of this method is not necessary).
Args:
user: the user who requested the save (as obtained from the request)
old_metadata (dict): the values of the fields with Scope.settings before the save was performed
old_content (dict): the values of the fields with Scope.content before the save was performed.
This will include 'data'.
"""
pass
# =============================== BUILTIN METHODS ==========================
def __eq__(self, other):
return (hasattr(other, 'scope_ids') and
self.scope_ids == other.scope_ids and
list(self.fields.keys()) == list(other.fields.keys()) and
all(getattr(self, field.name) == getattr(other, field.name)
for field in self.fields.values()))
def __repr__(self):
return (
"{0.__class__.__name__}("
"{0.runtime!r}, "
"{0._field_data!r}, "
"{0.scope_ids!r}"
")".format(self)
)
# ~~~~~~~~~~~~~~~ XModule Indirection ~~~~~~~~~~~~~~~~
@property
def _xmodule(self):
"""
Returns the XModule corresponding to this descriptor. Expects that the system
already supports all of the attributes needed by xmodules
"""
if self.xmodule_runtime is None:
raise UndefinedContext()
assert self.xmodule_runtime.error_descriptor_class is not None
if self.xmodule_runtime.xmodule_instance is None:
try:
self.xmodule_runtime.construct_xblock_from_class(
self.module_class,
descriptor=self,
scope_ids=self.scope_ids,
field_data=self._field_data,
for_parent=self.get_parent() if self.has_cached_parent else None
)
self.xmodule_runtime.xmodule_instance.save()
except Exception: # pylint: disable=broad-except
# xmodule_instance is set by the XModule.__init__. If we had an error after that,
# we need to clean it out so that we can set up the ErrorModule instead
self.xmodule_runtime.xmodule_instance = None
if isinstance(self, self.xmodule_runtime.error_descriptor_class):
log.exception('Error creating an ErrorModule from an ErrorDescriptor')
raise
log.exception('Error creating xmodule')
descriptor = self.xmodule_runtime.error_descriptor_class.from_descriptor(
self,
error_msg=exc_info_to_str(sys.exc_info())
)
descriptor.xmodule_runtime = self.xmodule_runtime
self.xmodule_runtime.xmodule_instance = descriptor._xmodule # pylint: disable=protected-access
return self.xmodule_runtime.xmodule_instance
course_id = module_attr('course_id')
displayable_items = module_attr('displayable_items')
get_display_items = module_attr('get_display_items')
get_icon_class = module_attr('get_icon_class')
get_progress = module_attr('get_progress')
get_score = module_attr('get_score')
handle_ajax = module_attr('handle_ajax')
student_view = module_attr(STUDENT_VIEW)
public_view = module_attr(PUBLIC_VIEW)
get_child_descriptors = module_attr('get_child_descriptors')
xmodule_handler = module_attr('xmodule_handler')
# ~~~~~~~~~~~~~~~ XBlock API Wrappers ~~~~~~~~~~~~~~~~
def studio_view(self, _context):
"""
Return a fragment with the html from this XModuleDescriptor's editing view
Doesn't yet add any of the javascript to the fragment, nor the css.
Also doesn't expect any javascript binding, yet.
Makes no use of the context parameter
"""
return Fragment(self.get_html())
class ConfigurableFragmentWrapper(object):
"""
Runtime mixin that allows for composition of many `wrap_xblock` wrappers
"""
def __init__(self, wrappers=None, wrappers_asides=None, **kwargs):
"""
:param wrappers: A list of wrappers, where each wrapper is:
def wrapper(block, view, frag, context):
...
return wrapped_frag
"""
super(ConfigurableFragmentWrapper, self).__init__(**kwargs)
if wrappers is not None:
self.wrappers = wrappers
else:
self.wrappers = []
if wrappers_asides is not None:
self.wrappers_asides = wrappers_asides
else:
self.wrappers_asides = []
def wrap_xblock(self, block, view, frag, context):
"""
See :func:`Runtime.wrap_child`
"""
for wrapper in self.wrappers:
frag = wrapper(block, view, frag, context)
return frag
def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=unused-argument
"""
See :func:`Runtime.wrap_child`
"""
for wrapper in self.wrappers_asides:
frag = wrapper(aside, view, frag, context)
return frag
# This function exists to give applications (LMS/CMS) a place to monkey-patch until
# we can refactor modulestore to split out the FieldData half of its interface from
# the Runtime part of its interface. This function mostly matches the
# Runtime.handler_url interface.
#
# The monkey-patching happens in cms/djangoapps/xblock_config/apps.py and lms/djangoapps/lms_xblock/apps.py
def descriptor_global_handler_url(block, handler_name, suffix='', query='', thirdparty=False): # pylint: disable=unused-argument
"""
See :meth:`xblock.runtime.Runtime.handler_url`.
"""
raise NotImplementedError("Applications must monkey-patch this function before using handler_url for studio_view")
# This function exists to give applications (LMS/CMS) a place to monkey-patch until
# we can refactor modulestore to split out the FieldData half of its interface from
# the Runtime part of its interface. This function matches the Runtime.local_resource_url interface
#
# The monkey-patching happens in cms/djangoapps/xblock_config/apps.py and lms/djangoapps/lms_xblock/apps.py
def descriptor_global_local_resource_url(block, uri): # pylint: disable=invalid-name, unused-argument
"""
See :meth:`xblock.runtime.Runtime.local_resource_url`.
"""
raise NotImplementedError("Applications must monkey-patch this function before using local_resource_url for studio_view")
class MetricsMixin(object):
"""
Mixin for adding metric logging for render and handle methods in the DescriptorSystem and ModuleSystem.
"""
def render(self, block, view_name, context=None):
start_time = time.time()
try:
status = "success"
return super(MetricsMixin, self).render(block, view_name, context=context)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [
u'view_name:{}'.format(view_name),
u'action:render',
u'action_status:{}'.format(status),
u'course_id:{}'.format(course_id),
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
log.debug(
"%.3fs - render %s.%s (%s)",
duration,
block.__class__.__name__,
view_name,
getattr(block, 'location', ''),
)
def handle(self, block, handler_name, request, suffix=''):
start_time = time.time()
try:
status = "success"
return super(MetricsMixin, self).handle(block, handler_name, request, suffix=suffix)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [
u'handler_name:{}'.format(handler_name),
u'action:handle',
u'action_status:{}'.format(status),
u'course_id:{}'.format(course_id),
u'block_type:{}'.format(block.scope_ids.block_type),
u'block_family:{}'.format(block.entry_point),
]
log.debug(
"%.3fs - handle %s.%s (%s)",
duration,
block.__class__.__name__,
handler_name,
getattr(block, 'location', ''),
)
class DescriptorSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
Base class for :class:`Runtime`s to be used with :class:`XModuleDescriptor`s
"""
# pylint: disable=bad-continuation
def __init__(
self, load_item, resources_fs, error_tracker, get_policy=None, disabled_xblock_types=lambda: [], **kwargs
):
"""
load_item: Takes a Location and returns an XModuleDescriptor
resources_fs: A Filesystem object that contains all of the
resources needed for the course
error_tracker: A hook for tracking errors in loading the descriptor.
Used for example to get a list of all non-fatal problems on course
load, and display them to the user.
A function of (error_msg). errortracker.py provides a
handy make_error_tracker() function.
Patterns for using the error handler:
try:
x = access_some_resource()
check_some_format(x)
except SomeProblem as err:
msg = 'Grommet {0} is broken: {1}'.format(x, str(err))
log.warning(msg) # don't rely on tracker to log
# NOTE: we generally don't want content errors logged as errors
self.system.error_tracker(msg)
# work around
return 'Oops, couldn't load grommet'
OR, if not in an exception context:
if not check_something(thingy):
msg = "thingy {0} is broken".format(thingy)
log.critical(msg)
self.system.error_tracker(msg)
NOTE: To avoid duplication, do not call the tracker on errors
that you're about to re-raise---let the caller track them.
get_policy: a function that takes a usage id and returns a dict of
policy to apply.
local_resource_url: an implementation of :meth:`xblock.runtime.Runtime.local_resource_url`
"""
kwargs.setdefault('id_reader', OpaqueKeyReader())
kwargs.setdefault('id_generator', AsideKeyGenerator())
super(DescriptorSystem, self).__init__(**kwargs)
# This is used by XModules to write out separate files during xml export
self.export_fs = None
self.load_item = load_item
self.resources_fs = resources_fs
self.error_tracker = error_tracker
if get_policy:
self.get_policy = get_policy
else:
self.get_policy = lambda u: {}
self.disabled_xblock_types = disabled_xblock_types
def get_block(self, usage_id, for_parent=None):
"""See documentation for `xblock.runtime:Runtime.get_block`"""
return self.load_item(usage_id, for_parent=for_parent)
def load_block_type(self, block_type):
"""
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.
"""
if block_type in self.disabled_xblock_types():
return self.default_class
return super(DescriptorSystem, self).load_block_type(block_type)
def get_field_provenance(self, xblock, field):
"""
For the given xblock, return a dict for the field's current state:
{
'default_value': what json'd value will take effect if field is unset: either the field default or
inherited value,
'explicitly_set': boolean for whether the current value is set v default/inherited,
}
:param xblock:
:param field:
"""
# pylint: disable=protected-access
# in runtime b/c runtime contains app-specific xblock behavior. Studio's the only app
# which needs this level of introspection right now. runtime also is 'allowed' to know
# about the kvs, dbmodel, etc.
result = {}
result['explicitly_set'] = xblock._field_data.has(xblock, field.name)
try:
result['default_value'] = xblock._field_data.default(xblock, field.name)
except KeyError:
result['default_value'] = field.to_json(field.default)
return result
def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct handler url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_handler_url(block, handler_name, suffix, query, thirdparty)
def local_resource_url(self, block, uri):
"""
See :meth:`xblock.runtime.Runtime:local_resource_url` for documentation.
"""
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct local_resource_url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_local_resource_url(block, uri)
def applicable_aside_types(self, block):
"""
See :meth:`xblock.runtime.Runtime:applicable_aside_types` for documentation.
"""
potential_set = set(super(DescriptorSystem, self).applicable_aside_types(block))
if getattr(block, 'xmodule_runtime', None) is not None:
if hasattr(block.xmodule_runtime, 'applicable_aside_types'):
application_set = set(block.xmodule_runtime.applicable_aside_types(block))
return list(potential_set.intersection(application_set))
return list(potential_set)
def resource_url(self, resource):
"""
See :meth:`xblock.runtime.Runtime:resource_url` for documentation.
"""
raise NotImplementedError("edX Platform doesn't currently implement XBlock resource urls")
def add_block_as_child_node(self, block, node):
child = etree.SubElement(node, "unknown")
child.set('url_name', block.url_name)
block.add_xml_to_node(child)
def publish(self, block, event_type, event):
# A stub publish method that doesn't emit any events from XModuleDescriptors.
pass
def service(self, block, service_name):
"""
Runtime-specific override for the XBlock service manager. If a service is not currently
instantiated and is declared as a critical requirement, an attempt is made to load the
module.
Arguments:
block (an XBlock): this block's class will be examined for service
decorators.
service_name (string): the name of the service requested.
Returns:
An object implementing the requested service, or None.
"""
# getting the service from parent module. making sure of block service declarations.
service = super(DescriptorSystem, self).service(block=block, service_name=service_name)
# Passing the block to service if it is callable e.g. ModuleI18nService. It is the responsibility of calling
# service to handle the passing argument.
if callable(service):
return service(block)
return service
new_contract('DescriptorSystem', DescriptorSystem)
class XMLParsingSystem(DescriptorSystem):
def __init__(self, process_xml, **kwargs):
"""
process_xml: Takes an xml string, and returns a XModuleDescriptor
created from that xml
"""
super(XMLParsingSystem, self).__init__(**kwargs)
self.process_xml = process_xml
def _usage_id_from_node(self, node, parent_id, id_generator=None):
"""Create a new usage id from an XML dom node.
Args:
node (lxml.etree.Element): The DOM node to interpret.
parent_id: The usage ID of the parent block
id_generator (IdGenerator): The :class:`.IdGenerator` to use
for creating ids
Returns:
UsageKey: the usage key for the new xblock
"""
return self.xblock_from_node(node, parent_id, id_generator).scope_ids.usage_id
def xblock_from_node(self, node, parent_id, id_generator=None):
"""
Create an XBlock instance from XML data.
Args:
xml_data (string): A string containing valid xml.
system (XMLParsingSystem): The :class:`.XMLParsingSystem` used to connect the block
to the outside world.
id_generator (IdGenerator): An :class:`~xblock.runtime.IdGenerator` that
will be used to construct the usage_id and definition_id for the block.
Returns:
XBlock: The fully instantiated :class:`~xblock.core.XBlock`.
"""
id_generator = id_generator or self.id_generator
# leave next line commented out - useful for low-level debugging
# log.debug('[_usage_id_from_node] tag=%s, class=%s' % (node.tag, xblock_class))
block_type = node.tag
# remove xblock-family from elements
node.attrib.pop('xblock-family', None)
url_name = node.get('url_name') # difference from XBlock.runtime
def_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(def_id)
keys = ScopeIds(None, block_type, def_id, usage_id)
block_class = self.mixologist.mix(self.load_block_type(block_type))
aside_children = self.parse_asides(node, def_id, usage_id, id_generator)
asides_tags = [x.tag for x in aside_children]
block = block_class.parse_xml(node, self, keys, id_generator)
self._convert_reference_fields_to_keys(block) # difference from XBlock.runtime
block.parent = parent_id
block.save()
asides = self.get_asides(block)
for asd in asides:
if asd.scope_ids.block_type in asides_tags:
block.add_aside(asd)
return block
def parse_asides(self, node, def_id, usage_id, id_generator):
"""pull the asides out of the xml payload and instantiate them"""
aside_children = []
for child in node.iterchildren():
# get xblock-family from node
xblock_family = child.attrib.pop('xblock-family', None)
if xblock_family:
xblock_family = self._family_id_to_superclass(xblock_family)
if issubclass(xblock_family, XBlockAside):
aside_children.append(child)
# now process them & remove them from the xml payload
for child in aside_children:
self._aside_from_xml(child, def_id, usage_id, id_generator)
node.remove(child)
return aside_children
def _make_usage_key(self, course_key, value):
"""
Makes value into a UsageKey inside the specified course.
If value is already a UsageKey, returns that.
"""
if isinstance(value, UsageKey):
return value
usage_key = UsageKey.from_string(value)
return usage_key.map_into_course(course_key)
def _convert_reference_fields_to_keys(self, xblock):
"""
Find all fields of type reference and convert the payload into UsageKeys
"""
course_key = xblock.scope_ids.usage_id.course_key
for field in six.itervalues(xblock.fields):
if field.is_set_on(xblock):
field_value = getattr(xblock, field.name)
if field_value is None:
continue
elif isinstance(field, Reference):
setattr(xblock, field.name, self._make_usage_key(course_key, field_value))
elif isinstance(field, ReferenceList):
setattr(xblock, field.name, [self._make_usage_key(course_key, ele) for ele in field_value])
elif isinstance(field, ReferenceValueDict):
for key, subvalue in six.iteritems(field_value):
assert isinstance(subvalue, six.string_types)
field_value[key] = self._make_usage_key(course_key, subvalue)
setattr(xblock, field.name, field_value)
class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
This is an abstraction such that x_modules can function independent
of the courseware (e.g. import into other types of courseware, LMS,
or if we want to have a sandbox server for user-contributed content)
ModuleSystem objects are passed to x_modules to provide access to system
functionality.
Note that these functions can be closures over e.g. a django request
and user, or other environment-specific info.
"""
@contract(descriptor_runtime='DescriptorSystem')
def __init__(
self, static_url, track_function, get_module, render_template,
replace_urls, descriptor_runtime, user=None, filestore=None,
debug=False, hostname="", xqueue=None, publish=None, node_path="",
anonymous_student_id='', course_id=None,
cache=None, can_execute_unsafe_code=None, replace_course_urls=None,
replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None,
field_data=None, get_user_role=None, rebind_noauth_module_to_user=None,
user_location=None, get_python_lib_zip=None, **kwargs):
"""
Create a closure around the system environment.
static_url - the base URL to static assets
track_function - function of (event_type, event), intended for logging
or otherwise tracking the event.
TODO: Not used, and has inconsistent args in different
files. Update or remove.
get_module - function that takes a descriptor and returns a corresponding
module instance object. If the current user does not have
access to that location, returns None.
render_template - a function that takes (template_file, context), and
returns rendered html.
user - The user to base the random number generator seed off of for this
request
filestore - A filestore ojbect. Defaults to an instance of OSFS based
at settings.DATA_DIR.
xqueue - Dict containing XqueueInterface object, as well as parameters
for the specific StudentModule:
xqueue = {'interface': XQueueInterface object,
'callback_url': Callback into the LMS,
'queue_name': Target queuename in Xqueue}
replace_urls - TEMPORARY - A function like static_replace.replace_urls
that capa_module can use to fix up the static urls in
ajax results.
descriptor_runtime - A `DescriptorSystem` to use for loading xblocks by id
anonymous_student_id - Used for tracking modules with student id
course_id - the course_id containing this module
publish(event) - A function that allows XModules to publish events (such as grade changes)
cache - A cache object with two methods:
.get(key) returns an object from the cache or None.
.set(key, value, timeout_secs=None) stores a value in the cache with a timeout.
can_execute_unsafe_code - A function returning a boolean, whether or
not to allow the execution of unsafe, unsandboxed code.
get_python_lib_zip - A function returning a bytestring or None. The
bytestring is the contents of a zip file that should be importable
by other Python code running in the module.
error_descriptor_class - The class to use to render XModules with errors
get_real_user - function that takes `anonymous_student_id` and returns real user_id,
associated with `anonymous_student_id`.
get_user_role - A function that returns user role. Implementation is different
for LMS and Studio.
field_data - the `FieldData` to use for backing XBlock storage.
rebind_noauth_module_to_user - rebinds module bound to AnonymousUser to a real user...used in LTI
modules, which have an anonymous handler, to set legitimate users' data
"""
# Usage_store is unused, and field_data is often supplanted with an
# explicit field_data during construct_xblock.
kwargs.setdefault('id_reader', getattr(descriptor_runtime, 'id_reader', OpaqueKeyReader()))
kwargs.setdefault('id_generator', getattr(descriptor_runtime, 'id_generator', AsideKeyGenerator()))
super(ModuleSystem, self).__init__(field_data=field_data, **kwargs)
self.STATIC_URL = static_url
self.xqueue = xqueue
self.track_function = track_function
self.filestore = filestore
self.get_module = get_module
self.render_template = render_template
self.DEBUG = self.debug = debug
self.HOSTNAME = self.hostname = hostname
self.seed = user.id if user is not None else 0
self.replace_urls = replace_urls
self.node_path = node_path
self.anonymous_student_id = anonymous_student_id
self.course_id = course_id
self.user_is_staff = user is not None and user.is_staff
if publish:
self.publish = publish
self.cache = cache or DoNothingCache()
self.can_execute_unsafe_code = can_execute_unsafe_code or (lambda: False)
self.get_python_lib_zip = get_python_lib_zip or (lambda: None)
self.replace_course_urls = replace_course_urls
self.replace_jump_to_id_urls = replace_jump_to_id_urls
self.error_descriptor_class = error_descriptor_class
self.xmodule_instance = None
self.get_real_user = get_real_user
self.user_location = user_location
self.get_user_role = get_user_role
self.descriptor_runtime = descriptor_runtime
self.rebind_noauth_module_to_user = rebind_noauth_module_to_user
if user:
self.user_id = user.id
def get(self, attr):
""" provide uniform access to attributes (like etree)."""
return self.__dict__.get(attr)
def set(self, attr, val):
"""provide uniform access to attributes (like etree)"""
self.__dict__[attr] = val
def __repr__(self):
kwargs = self.__dict__.copy()
# Remove value set transiently by XBlock
kwargs.pop('_view_name')
return "{}{}".format(self.__class__.__name__, kwargs)
@property
def ajax_url(self):
"""
The url prefix to be used by XModules to call into handle_ajax
"""
assert self.xmodule_instance is not None
return self.handler_url(self.xmodule_instance, 'xmodule_handler', '', '').rstrip('/?')
def get_block(self, block_id, for_parent=None):
return self.get_module(self.descriptor_runtime.get_block(block_id, for_parent=for_parent))
def resource_url(self, resource):
raise NotImplementedError("edX Platform doesn't currently implement XBlock resource urls")
def publish(self, block, event_type, event):
pass
def service(self, block, service_name):
"""
Runtime-specific override for the XBlock service manager. If a service is not currently
instantiated and is declared as a critical requirement, an attempt is made to load the
module.
Arguments:
block (an XBlock): this block's class will be examined for service
decorators.
service_name (string): the name of the service requested.
Returns:
An object implementing the requested service, or None.
"""
# getting the service from parent module. making sure of block service declarations.
service = super(ModuleSystem, self).service(block=block, service_name=service_name)
# Passing the block to service if it is callable e.g. ModuleI18nService. It is the responsibility of calling
# service to handle the passing argument.
if callable(service):
return service(block)
return service
class CombinedSystem(object):
"""
This class is a shim to allow both pure XBlocks and XModuleDescriptors
that have been bound as XModules to access both the attributes of ModuleSystem
and of DescriptorSystem as a single runtime.
"""
__slots__ = ('_module_system', '_descriptor_system')
# This system doesn't override a number of methods that are provided by ModuleSystem and DescriptorSystem,
# namely handler_url, local_resource_url, query, and resource_url.
#
# At runtime, the ModuleSystem and/or DescriptorSystem will define those methods
#
def __init__(self, module_system, descriptor_system):
# These attributes are set directly to __dict__ below to avoid a recursion in getattr/setattr.
self._module_system = module_system
self._descriptor_system = descriptor_system
def _get_student_block(self, block):
"""
If block is an XModuleDescriptor that has been bound to a student, return
the corresponding XModule, instead of the XModuleDescriptor.
Otherwise, return block.
"""
if isinstance(block, XModuleDescriptor) and block.xmodule_runtime:
return block._xmodule # pylint: disable=protected-access
else:
return block
def render(self, block, view_name, context=None):
"""
Render a block by invoking its view.
Finds the view named `view_name` on `block`. The default view will be
used if a specific view hasn't be registered. If there is no default
view, an exception will be raised.
The view is invoked, passing it `context`. The value returned by the
view is returned, with possible modifications by the runtime to
integrate it into a larger whole.
"""
context = context or {}
if view_name in PREVIEW_VIEWS:
block = self._get_student_block(block)
return self.__getattr__('render')(block, view_name, context)
def service(self, block, service_name):
"""Return a service, or None.
Services are objects implementing arbitrary other interfaces. They are
requested by agreed-upon names, see [XXX TODO] for a list of possible
services. The object returned depends on the service requested.
XBlocks must announce their intention to request services with the
`XBlock.needs` or `XBlock.wants` decorators. Use `needs` if you assume
that the service is available, or `wants` if your code is flexible and
can accept a None from this method.
Runtimes can override this method if they have different techniques for
finding and delivering services.
Arguments:
block (an XBlock): this block's class will be examined for service
decorators.
service_name (string): the name of the service requested.
Returns:
An object implementing the requested service, or None.
"""
service = None
if self._module_system:
service = self._module_system.service(block, service_name)
if service is None:
service = self._descriptor_system.service(block, service_name)
return service
def __getattr__(self, name):
"""
If the ModuleSystem doesn't have an attribute, try returning the same attribute from the
DescriptorSystem, instead. This allows XModuleDescriptors that are bound as XModules
to still function as XModuleDescriptors.
"""
# First we try a lookup in the module system...
try:
return getattr(self._module_system, name)
except AttributeError:
return getattr(self._descriptor_system, name)
def __setattr__(self, name, value):
"""
If the ModuleSystem is set, set the attr on it.
Always set the attr on the DescriptorSystem.
"""
if name in self.__slots__:
return super(CombinedSystem, self).__setattr__(name, value)
if self._module_system:
setattr(self._module_system, name, value)
setattr(self._descriptor_system, name, value)
def __delattr__(self, name):
"""
If the ModuleSystem is set, delete the attribute from it.
Always delete the attribute from the DescriptorSystem.
"""
if self._module_system:
delattr(self._module_system, name)
delattr(self._descriptor_system, name)
def __repr__(self):
return "CombinedSystem({!r}, {!r})".format(self._module_system, self._descriptor_system)
class DoNothingCache(object):
"""A duck-compatible object to use in ModuleSystem when there's no cache."""
def get(self, _key):
return None
def set(self, key, value, timeout=None):
pass
| agpl-3.0 |
wesleifreitas/subbrute | dnslib/lex.py | 20 | 10289 | # -*- coding: utf-8 -*-
from __future__ import print_function
import collections,string
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Lexer(object):
"""
Simple Lexer base class. Provides basic lexer framework and
helper functionality (read/peek/pushback etc)
Each state is implemented using a method (lexXXXX) which should
match a single token and return a (token,lexYYYY) tuple, with lexYYYY
representing the next state. If token is None this is not emitted
and if lexYYYY is None or the lexer reaches the end of the
input stream the lexer exits.
The 'parse' method returns a generator that will return tokens
(the class also acts as an iterator)
The default start state is 'lexStart'
Input can either be a string/bytes or file object.
The approach is based loosely on Rob Pike's Go lexer presentation
(using generators rather than channels).
>>> p = Lexer("a bcd efgh")
>>> p.read()
'a'
>>> p.read()
' '
>>> p.peek(3)
'bcd'
>>> p.read(5)
'bcd e'
>>> p.pushback('e')
>>> p.read(4)
'efgh'
"""
escape_chars = '\\'
escape = {'n':'\n','t':'\t','r':'\r'}
def __init__(self,f,debug=False):
if hasattr(f,'read'):
self.f = f
elif type(f) == str:
self.f = StringIO(f)
elif type(f) == bytes:
self.f = StringIO(f.decode())
else:
raise ValueError("Invalid input")
self.debug = debug
self.q = collections.deque()
self.state = self.lexStart
self.escaped = False
self.eof = False
def __iter__(self):
return self.parse()
def next_token(self):
if self.debug:
print("STATE",self.state)
(tok,self.state) = self.state()
return tok
def parse(self):
while self.state is not None and not self.eof:
tok = self.next_token()
if tok:
yield tok
def read(self,n=1):
s = ""
while self.q and n > 0:
s += self.q.popleft()
n -= 1
s += self.f.read(n)
if s == '':
self.eof = True
if self.debug:
print("Read: >%s<" % repr(s))
return s
def peek(self,n=1):
s = ""
i = 0
while len(self.q) > i and n > 0:
s += self.q[i]
i += 1
n -= 1
r = self.f.read(n)
if n > 0 and r == '':
self.eof = True
self.q.extend(r)
if self.debug:
print("Peek : >%s<" % repr(s + r))
return s + r
def pushback(self,s):
p = collections.deque(s)
p.extend(self.q)
self.q = p
def readescaped(self):
c = self.read(1)
if c in self.escape_chars:
self.escaped = True
n = self.peek(3)
if n.isdigit():
n = self.read(3)
if self.debug:
print("Escape: >%s<" % n)
return chr(int(n,8))
elif n[0] in 'x':
x = self.read(3)
if self.debug:
print("Escape: >%s<" % x)
return chr(int(x[1:],16))
else:
c = self.read(1)
if self.debug:
print("Escape: >%s<" % c)
return self.escape.get(c,c)
else:
self.escaped = False
return c
def lexStart(self):
return (None,None)
class WordLexer(Lexer):
"""
Example lexer which will split input stream into words (respecting
quotes)
To emit SPACE tokens: self.spacetok = ('SPACE',None)
To emit NL tokens: self.nltok = ('NL',None)
>>> l = WordLexer(r'abc "def\100\x3d\. ghi" jkl')
>>> list(l)
[('ATOM', 'abc'), ('ATOM', 'def@=. ghi'), ('ATOM', 'jkl')]
>>> l = WordLexer(r"1 '2 3 4' 5")
>>> list(l)
[('ATOM', '1'), ('ATOM', '2 3 4'), ('ATOM', '5')]
>>> l = WordLexer("abc# a comment")
>>> list(l)
[('ATOM', 'abc'), ('COMMENT', 'a comment')]
"""
wordchars = set(string.ascii_letters) | set(string.digits) | \
set(string.punctuation)
quotechars = set('"\'')
commentchars = set('#')
spacechars = set(' \t\x0b\x0c')
nlchars = set('\r\n')
spacetok = None
nltok = None
def lexStart(self):
return (None,self.lexSpace)
def lexSpace(self):
s = []
if self.spacetok:
tok = lambda n : (self.spacetok,n) if s else (None,n)
else:
tok = lambda n : (None,n)
while not self.eof:
c = self.peek()
if c in self.spacechars:
s.append(self.read())
elif c in self.nlchars:
return tok(self.lexNL)
elif c in self.commentchars:
return tok(self.lexComment)
elif c in self.quotechars:
return tok(self.lexQuote)
elif c in self.wordchars:
return tok(self.lexWord)
return (self.spacetok,self.lexWord)
elif c:
raise ValueError("Invalid input [%d]: %s" % (
self.f.tell(),c))
return (None,None)
def lexNL(self):
while True:
c = self.read()
if c not in self.nlchars:
self.pushback(c)
return (self.nltok,self.lexSpace)
def lexComment(self):
s = []
tok = lambda n : (('COMMENT',''.join(s)),n) if s else (None,n)
start = False
_ = self.read()
while not self.eof:
c = self.read()
if c == '\n':
self.pushback(c)
return tok(self.lexNL)
elif start or c not in string.whitespace:
start = True
s.append(c)
return tok(None)
def lexWord(self):
s = []
tok = lambda n : (('ATOM',''.join(s)),n) if s else (None,n)
while not self.eof:
c = self.peek()
if c == '"':
return tok(self.lexQuote)
elif c in self.commentchars:
return tok(self.lexComment)
elif c.isspace():
return tok(self.lexSpace)
elif c in self.wordchars:
s.append(self.read())
elif c:
raise ValueError('Invalid input [%d]: %s' % (
self.f.tell(),c))
return tok(None)
def lexQuote(self):
s = []
tok = lambda n : (('ATOM',''.join(s)),n)
q = self.read(1)
while not self.eof:
c = self.readescaped()
if c == q and not self.escaped:
break
else:
s.append(c)
return tok(self.lexSpace)
class RandomLexer(Lexer):
"""
Test lexing from infinite stream.
Extract strings of letters/numbers from /dev/urandom
>>> import itertools,sys
>>> if sys.version[0] == '2':
... f = open("/dev/urandom")
... else:
... f = open("/dev/urandom",encoding="ascii",errors="replace")
>>> r = RandomLexer(f)
>>> i = iter(r)
>>> len(list(itertools.islice(i,10)))
10
"""
minalpha = 4
mindigits = 3
def lexStart(self):
return (None,self.lexRandom)
def lexRandom(self):
n = 0
c = self.peek(1)
while not self.eof:
if c.isalpha():
return (None,self.lexAlpha)
elif c.isdigit():
return (None,self.lexDigits)
else:
n += 1
_ = self.read(1)
c = self.peek(1)
return (None,None)
def lexDigits(self):
s = []
c = self.read(1)
while c.isdigit():
s.append(c)
c = self.read(1)
self.pushback(c)
if len(s) >= self.mindigits:
return (('NUMBER',"".join(s)),self.lexRandom)
else:
return (None,self.lexRandom)
def lexAlpha(self):
s = []
c = self.read(1)
while c.isalpha():
s.append(c)
c = self.read(1)
self.pushback(c)
if len(s) >= self.minalpha:
return (('STRING',"".join(s)),self.lexRandom)
else:
return (None,self.lexRandom)
if __name__ == '__main__':
import argparse,doctest,sys
p = argparse.ArgumentParser(description="Lex Tester")
p.add_argument("--lex","-l",action='store_true',default=False,
help="Lex input (stdin)")
p.add_argument("--nl",action='store_true',default=False,
help="Output NL tokens")
p.add_argument("--space",action='store_true',default=False,
help="Output Whitespace tokens")
p.add_argument("--wordchars",help="Wordchars")
p.add_argument("--quotechars",help="Quotechars")
p.add_argument("--commentchars",help="Commentchars")
p.add_argument("--spacechars",help="Spacechars")
p.add_argument("--nlchars",help="NLchars")
args = p.parse_args()
if args.lex:
l = WordLexer(sys.stdin)
if args.wordchars:
l.wordchars = set(args.wordchars)
if args.quotechars:
l.quotechars = set(args.quotechars)
if args.commentchars:
l.commentchars = set(args.commentchars)
if args.spacechars:
l.spacechars = set(args.spacechars)
if args.nlchars:
l.nlchars = set(args.nlchars)
if args.space:
l.spacetok = ('SPACE',)
if args.nl:
l.nltok = ('NL',)
for tok in l:
print(tok)
else:
try:
# Test if we have /dev/urandom
open("/dev/urandom")
doctest.testmod()
except IOError:
# Don't run stream test
doctest.run_docstring_examples(Lexer, globals())
doctest.run_docstring_examples(WordLexer, globals())
| gpl-3.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.3/django/db/backends/oracle/creation.py | 153 | 11808 | import sys, time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
self.remember = {}
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
self.remember['user'] = self.connection.settings_dict['USER']
self.remember['passwd'] = self.connection.settings_dict['PASSWORD']
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict["USER"] = TEST_USER
self.connection.settings_dict["PASSWORD"] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict["USER"] = self.remember['user']
self.connection.settings_dict["PASSWORD"] = self.remember['passwd']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| mit |
wilmoz/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py | 652 | 6143 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions and exceptions used by WebSocket opening handshake
processors.
"""
from mod_pywebsocket import common
from mod_pywebsocket import http_header_util
class AbortedByUserException(Exception):
"""Exception for aborting a connection intentionally.
If this exception is raised in do_extra_handshake handler, the connection
will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
If this exception is raised in transfer_data_handler, the connection will
be closed without closing handshake. No other WebSocket or HTTP(S) handler
will be invoked.
"""
pass
class HandshakeException(Exception):
"""This exception will be raised when an error occurred while processing
WebSocket initial handshake.
"""
def __init__(self, name, status=None):
super(HandshakeException, self).__init__(name)
self.status = status
class VersionException(Exception):
"""This exception will be raised when a version of client request does not
match with version the server supports.
"""
def __init__(self, name, supported_versions=''):
"""Construct an instance.
Args:
supported_version: a str object to show supported hybi versions.
(e.g. '8, 13')
"""
super(VersionException, self).__init__(name)
self.supported_versions = supported_versions
def get_default_port(is_secure):
if is_secure:
return common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
return common.DEFAULT_WEB_SOCKET_PORT
def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be encoded HTTP token.
state = http_header_util.ParsingState(subprotocol)
token = http_header_util.consume_token(state)
rest = http_header_util.peek(state)
# If |rest| is not None, |subprotocol| is not one token or invalid. If
# |rest| is None, |token| must not be None because |subprotocol| is
# concatenation of |token| and |rest| and is not None.
if rest is not None:
raise HandshakeException('Invalid non-token string in subprotocol '
'name: %r' % rest)
def parse_host_header(request):
fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
return fields[0], int(fields[1])
except ValueError, e:
raise HandshakeException('Invalid port number format: %r' % e)
def format_header(name, value):
return '%s: %s\r\n' % (name, value)
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
raise HandshakeException('Header %s is not defined' % key)
return value
def validate_mandatory_header(request, key, expected_value, fail_status=None):
value = get_mandatory_header(request, key)
if value.lower() != expected_value.lower():
raise HandshakeException(
'Expected %r for header %s but found %r (case-insensitive)' %
(expected_value, key, value), status=fail_status)
def check_request_line(request):
# 5.1 1. The three character UTF-8 string "GET".
# 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
if request.method != 'GET':
raise HandshakeException('Method is not GET: %r' % request.method)
if request.protocol != 'HTTP/1.1':
raise HandshakeException('Version is not HTTP/1.1: %r' %
request.protocol)
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
Leading LWSes must be trimmed.
"""
state = http_header_util.ParsingState(data)
token_list = []
while True:
token = http_header_util.consume_token(state)
if token is not None:
token_list.append(token)
http_header_util.consume_lwses(state)
if http_header_util.peek(state) is None:
break
if not http_header_util.consume_string(state, ','):
raise HandshakeException(
'Expected a comma but found %r' % http_header_util.peek(state))
http_header_util.consume_lwses(state)
if len(token_list) == 0:
raise HandshakeException('No valid token found')
return token_list
# vi:sts=4 sw=4 et
| mpl-2.0 |
hexahedria/biaxial-rnn-music-composition | model.py | 1 | 17458 | import theano, theano.tensor as T
import numpy as np
import theano_lstm
from out_to_in_op import OutputFormToInputFormOp
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss, MultiDropout
def has_hidden(layer):
"""
Whether a layer has a trainable
initial hidden state.
"""
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
# Cast n to int32 if necessary to prevent error on 32 bit systems
return T.repeat(T.shape_padleft(vector),
n if (theano.configdefaults.local_bitwidth() == 64) else T.cast(n,'int32'),
axis=0)
def initial_state(layer, dimensions = None):
"""
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
"""
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
"""Optionally wrap tensor variable into a dict with taps=[-1]"""
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class PassthroughLayer(Layer):
"""
Empty "layer" used to get the final output of the LSTM
"""
def __init__(self):
self.is_recursive = False
def create_variables(self):
pass
def activate(self, x):
return x
@property
def params(self):
return []
@params.setter
def params(self, param_list):
pass
def get_last_layer(result):
if isinstance(result, list):
return result[-1]
else:
return result
def ensure_list(result):
if isinstance(result, list):
return result
else:
return [result]
class Model(object):
def __init__(self, t_layer_sizes, p_layer_sizes, dropout=0):
self.t_layer_sizes = t_layer_sizes
self.p_layer_sizes = p_layer_sizes
# From our architecture definition, size of the notewise input
self.t_input_size = 80
# time network maps from notewise input size to various hidden sizes
self.time_model = StackedCells( self.t_input_size, celltype=LSTM, layers = t_layer_sizes)
self.time_model.layers.append(PassthroughLayer())
# pitch network takes last layer of time model and state of last note, moving upward
# and eventually ends with a two-element sigmoid layer
p_input_size = t_layer_sizes[-1] + 2
self.pitch_model = StackedCells( p_input_size, celltype=LSTM, layers = p_layer_sizes)
self.pitch_model.layers.append(Layer(p_layer_sizes[-1], 2, activation = T.nnet.sigmoid))
self.dropout = dropout
self.conservativity = T.fscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.setup_train()
self.setup_predict()
self.setup_slow_walk()
@property
def params(self):
return self.time_model.params + self.pitch_model.params
@params.setter
def params(self, param_list):
ntimeparams = len(self.time_model.params)
self.time_model.params = param_list[:ntimeparams]
self.pitch_model.params = param_list[ntimeparams:]
@property
def learned_config(self):
return [self.time_model.params, self.pitch_model.params, [l.initial_hidden_state for mod in (self.time_model, self.pitch_model) for l in mod.layers if has_hidden(l)]]
@learned_config.setter
def learned_config(self, learned_list):
self.time_model.params = learned_list[0]
self.pitch_model.params = learned_list[1]
for l, val in zip((l for mod in (self.time_model, self.pitch_model) for l in mod.layers if has_hidden(l)), learned_list[2]):
l.initial_hidden_state.set_value(val.get_value())
def setup_train(self):
# dimensions: (batch, time, notes, input_data) with input_data as in architecture
self.input_mat = T.btensor4()
# dimensions: (batch, time, notes, onOrArtic) with 0:on, 1:artic
self.output_mat = T.btensor4()
self.epsilon = np.spacing(np.float32(1.0))
def step_time(in_data, *other):
other = list(other)
split = -len(self.t_layer_sizes) if self.dropout else len(other)
hiddens = other[:split]
masks = [None] + other[split:] if self.dropout else []
new_states = self.time_model.forward(in_data, prev_hiddens=hiddens, dropout=masks)
return new_states
def step_note(in_data, *other):
other = list(other)
split = -len(self.p_layer_sizes) if self.dropout else len(other)
hiddens = other[:split]
masks = [None] + other[split:] if self.dropout else []
new_states = self.pitch_model.forward(in_data, prev_hiddens=hiddens, dropout=masks)
return new_states
# We generate an output for each input, so it doesn't make sense to use the last output as an input.
# Note that we assume the sentinel start value is already present
# TEMP CHANGE: NO SENTINEL
input_slice = self.input_mat[:,0:-1]
n_batch, n_time, n_note, n_ipn = input_slice.shape
# time_inputs is a matrix (time, batch/note, input_per_note)
time_inputs = input_slice.transpose((1,0,2,3)).reshape((n_time,n_batch*n_note,n_ipn))
num_time_parallel = time_inputs.shape[1]
# apply dropout
if self.dropout > 0:
time_masks = theano_lstm.MultiDropout( [(num_time_parallel, shape) for shape in self.t_layer_sizes], self.dropout)
else:
time_masks = []
time_outputs_info = [initial_state_with_taps(layer, num_time_parallel) for layer in self.time_model.layers]
time_result, _ = theano.scan(fn=step_time, sequences=[time_inputs], non_sequences=time_masks, outputs_info=time_outputs_info)
self.time_thoughts = time_result
# Now time_result is a list of matrix [layer](time, batch/note, hidden_states) for each layer but we only care about
# the hidden state of the last layer.
# Transpose to be (note, batch/time, hidden_states)
last_layer = get_last_layer(time_result)
n_hidden = last_layer.shape[2]
time_final = get_last_layer(time_result).reshape((n_time,n_batch,n_note,n_hidden)).transpose((2,1,0,3)).reshape((n_note,n_batch*n_time,n_hidden))
# note_choices_inputs represents the last chosen note. Starts with [0,0], doesn't include last note.
# In (note, batch/time, 2) format
# Shape of start is thus (1, N, 2), concatenated with all but last element of output_mat transformed to (x, N, 2)
start_note_values = T.alloc(np.array(0,dtype=np.int8), 1, time_final.shape[1], 2 )
correct_choices = self.output_mat[:,1:,0:-1,:].transpose((2,0,1,3)).reshape((n_note-1,n_batch*n_time,2))
note_choices_inputs = T.concatenate([start_note_values, correct_choices], axis=0)
# Together, this and the output from the last LSTM goes to the new LSTM, but rotated, so that the batches in
# one direction are the steps in the other, and vice versa.
note_inputs = T.concatenate( [time_final, note_choices_inputs], axis=2 )
num_timebatch = note_inputs.shape[1]
# apply dropout
if self.dropout > 0:
pitch_masks = theano_lstm.MultiDropout( [(num_timebatch, shape) for shape in self.p_layer_sizes], self.dropout)
else:
pitch_masks = []
note_outputs_info = [initial_state_with_taps(layer, num_timebatch) for layer in self.pitch_model.layers]
note_result, _ = theano.scan(fn=step_note, sequences=[note_inputs], non_sequences=pitch_masks, outputs_info=note_outputs_info)
self.note_thoughts = note_result
# Now note_result is a list of matrix [layer](note, batch/time, onOrArticProb) for each layer but we only care about
# the hidden state of the last layer.
# Transpose to be (batch, time, note, onOrArticProb)
note_final = get_last_layer(note_result).reshape((n_note,n_batch,n_time,2)).transpose(1,2,0,3)
# The cost of the entire procedure is the negative log likelihood of the events all happening.
# For the purposes of training, if the ouputted probability is P, then the likelihood of seeing a 1 is P, and
# the likelihood of seeing 0 is (1-P). So the likelihood is (1-P)(1-x) + Px = 2Px - P - x + 1
# Since they are all binary decisions, and are all probabilities given all previous decisions, we can just
# multiply the likelihoods, or, since we are logging them, add the logs.
# Note that we mask out the articulations for those notes that aren't played, because it doesn't matter
# whether or not those are articulated.
# The padright is there because self.output_mat[:,:,:,0] -> 3D matrix with (b,x,y), but we need 3d tensor with
# (b,x,y,1) instead
active_notes = T.shape_padright(self.output_mat[:,1:,:,0])
mask = T.concatenate([T.ones_like(active_notes),active_notes], axis=3)
loglikelihoods = mask * T.log( 2*note_final*self.output_mat[:,1:] - note_final - self.output_mat[:,1:] + 1 + self.epsilon )
self.cost = T.neg(T.sum(loglikelihoods))
updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta")
self.update_fun = theano.function(
inputs=[self.input_mat, self.output_mat],
outputs=self.cost,
updates=updates,
allow_input_downcast=True)
self.update_thought_fun = theano.function(
inputs=[self.input_mat, self.output_mat],
outputs= ensure_list(self.time_thoughts) + ensure_list(self.note_thoughts) + [self.cost],
allow_input_downcast=True)
def _predict_step_note(self, in_data_from_time, *states):
# States is [ *hiddens, last_note_choice ]
hiddens = list(states[:-1])
in_data_from_prev = states[-1]
in_data = T.concatenate([in_data_from_time, in_data_from_prev])
# correct for dropout
if self.dropout > 0:
masks = [1 - self.dropout for layer in self.pitch_model.layers]
masks[0] = None
else:
masks = []
new_states = self.pitch_model.forward(in_data, prev_hiddens=hiddens, dropout=masks)
# Now new_states is a per-layer set of activations.
probabilities = get_last_layer(new_states)
# Thus, probabilities is a vector of two probabilities, P(play), and P(artic | play)
shouldPlay = self.srng.uniform() < (probabilities[0] ** self.conservativity)
shouldArtic = shouldPlay * (self.srng.uniform() < probabilities[1])
chosen = T.cast(T.stack(shouldPlay, shouldArtic), "int8")
return ensure_list(new_states) + [chosen]
def setup_predict(self):
# In prediction mode, note steps are contained in the time steps. So the passing gets a little bit hairy.
self.predict_seed = T.bmatrix()
self.steps_to_simulate = T.iscalar()
def step_time(*states):
# States is [ *hiddens, prev_result, time]
hiddens = list(states[:-2])
in_data = states[-2]
time = states[-1]
# correct for dropout
if self.dropout > 0:
masks = [1 - self.dropout for layer in self.time_model.layers]
masks[0] = None
else:
masks = []
new_states = self.time_model.forward(in_data, prev_hiddens=hiddens, dropout=masks)
# Now new_states is a list of matrix [layer](notes, hidden_states) for each layer
time_final = get_last_layer(new_states)
start_note_values = theano.tensor.alloc(np.array(0,dtype=np.int8), 2)
# This gets a little bit complicated. In the training case, we can pass in a combination of the
# time net's activations with the known choices. But in the prediction case, those choices don't
# exist yet. So instead of iterating over the combination, we iterate over only the activations,
# and then combine in the previous outputs in the step. And then since we are passing outputs to
# previous inputs, we need an additional outputs_info for the initial "previous" output of zero.
note_outputs_info = ([ initial_state_with_taps(layer) for layer in self.pitch_model.layers ] +
[ dict(initial=start_note_values, taps=[-1]) ])
notes_result, updates = theano.scan(fn=self._predict_step_note, sequences=[time_final], outputs_info=note_outputs_info)
# Now notes_result is a list of matrix [layer/output](notes, onOrArtic)
output = get_last_layer(notes_result)
next_input = OutputFormToInputFormOp()(output, time + 1) # TODO: Fix time
#next_input = T.cast(T.alloc(0, 3, 4),'int64')
return (ensure_list(new_states) + [ next_input, time + 1, output ]), updates
# start_sentinel = startSentinel()
num_notes = self.predict_seed.shape[0]
time_outputs_info = ([ initial_state_with_taps(layer, num_notes) for layer in self.time_model.layers ] +
[ dict(initial=self.predict_seed, taps=[-1]),
dict(initial=0, taps=[-1]),
None ])
time_result, updates = theano.scan( fn=step_time,
outputs_info=time_outputs_info,
n_steps=self.steps_to_simulate )
self.predict_thoughts = time_result
self.predicted_output = time_result[-1]
self.predict_fun = theano.function(
inputs=[self.steps_to_simulate, self.conservativity, self.predict_seed],
outputs=self.predicted_output,
updates=updates,
allow_input_downcast=True)
self.predict_thought_fun = theano.function(
inputs=[self.steps_to_simulate, self.conservativity, self.predict_seed],
outputs=ensure_list(self.predict_thoughts),
updates=updates,
allow_input_downcast=True)
def setup_slow_walk(self):
self.walk_input = theano.shared(np.ones((2,2), dtype='int8'))
self.walk_time = theano.shared(np.array(0, dtype='int64'))
self.walk_hiddens = [theano.shared(np.ones((2,2), dtype=theano.config.floatX)) for layer in self.time_model.layers if has_hidden(layer)]
# correct for dropout
if self.dropout > 0:
masks = [1 - self.dropout for layer in self.time_model.layers]
masks[0] = None
else:
masks = []
new_states = self.time_model.forward(self.walk_input, prev_hiddens=self.walk_hiddens, dropout=masks)
# Now new_states is a list of matrix [layer](notes, hidden_states) for each layer
time_final = get_last_layer(new_states)
start_note_values = theano.tensor.alloc(np.array(0,dtype=np.int8), 2)
note_outputs_info = ([ initial_state_with_taps(layer) for layer in self.pitch_model.layers ] +
[ dict(initial=start_note_values, taps=[-1]) ])
notes_result, updates = theano.scan(fn=self._predict_step_note, sequences=[time_final], outputs_info=note_outputs_info)
# Now notes_result is a list of matrix [layer/output](notes, onOrArtic)
output = get_last_layer(notes_result)
next_input = OutputFormToInputFormOp()(output, self.walk_time + 1) # TODO: Fix time
#next_input = T.cast(T.alloc(0, 3, 4),'int64')
slow_walk_results = (new_states[:-1] + notes_result[:-1] + [ next_input, output ])
updates.update({
self.walk_time: self.walk_time+1,
self.walk_input: next_input
})
updates.update({hidden:newstate for hidden, newstate, layer in zip(self.walk_hiddens, new_states, self.time_model.layers) if has_hidden(layer)})
self.slow_walk_fun = theano.function(
inputs=[self.conservativity],
outputs=slow_walk_results,
updates=updates,
allow_input_downcast=True)
def start_slow_walk(self, seed):
seed = np.array(seed)
num_notes = seed.shape[0]
self.walk_time.set_value(0)
self.walk_input.set_value(seed)
for layer, hidden in zip((l for l in self.time_model.layers if has_hidden(l)),self.walk_hiddens):
hidden.set_value(np.repeat(np.reshape(layer.initial_hidden_state.get_value(), (1,-1)), num_notes, axis=0))
| bsd-2-clause |
FelixSchwarz/soapfish | soapfish/testutil/simpletype_testcase.py | 1 | 1555 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from lxml import etree
from pythonic_testcase import PythonicTestCase
from .. import xsd
__all__ = ['SimpleTypeTestCase']
class SimpleTypeTestCase(PythonicTestCase):
xsd_type = None
# --- custom assertions ---------------------------------------------------
def assert_parse(self, expected_value, string_value):
self.assert_equals(expected_value, self._parse(string_value))
def assert_can_set(self, value):
class Container(xsd.ComplexType):
foo = xsd.Element(self.xsd_type)
container = Container()
container.foo = value
return container.foo
def assert_can_not_set(self, value):
class Container(xsd.ComplexType):
foo = xsd.Element(self.xsd_type)
container = Container()
try:
container.foo = value
except ValueError:
pass
else:
self.fail('did accept forbidden value %r' % value)
# --- internal helpers ----------------------------------------------------
def _parse(self, string_value):
class Container(xsd.ComplexType):
foo = xsd.Element(self.xsd_type)
if string_value is None:
string_value = ''
xml = '<container><foo>%s</foo></container>' % string_value
return Container.parsexml(xml).foo
def _normalize(self, xml):
parser = etree.XMLParser(remove_blank_text=True)
return etree.tostring(etree.XML(xml, parser=parser))
| bsd-3-clause |
charris/numpy | numpy/testing/_private/parameterized.py | 7 | 16161 | """
tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
Unless stated otherwise in the source files, all code is copyright 2010 David
Wolever <david@wolever.net>. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of David Wolever.
"""
import re
import inspect
import warnings
from functools import wraps
from types import MethodType
from collections import namedtuple
from unittest import TestCase
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args , **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
elif isinstance(args, (str,)):
args = (args, )
try:
return cls(*args)
except TypeError as e:
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
%(args, args),
)
def __repr__(self):
return "param(*%r, **%r)" %self
def parameterized_argument_value_pairs(func, p):
"""Return tuples of parameterized arguments and their values.
This is useful if you are writing your own doc_func
function and need to know the values for each parameter name::
>>> def func(a, foo=None, bar=42, **kwargs): pass
>>> p = param(1, foo=7, extra=99)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
If the function's first argument is named ``self`` then it will be
ignored::
>>> def func(self, a): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1)]
Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
>>> def func(foo, *args): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1)]
>>> p = param(1, 16)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1), ("*args", (16, ))]
"""
argspec = inspect.getargspec(func)
arg_offset = 1 if argspec.args[:1] == ["self"] else 0
named_args = argspec.args[arg_offset:]
result = list(zip(named_args, p.args))
named_args = argspec.args[len(result) + arg_offset:]
varargs = p.args[len(result):]
result.extend([
(name, p.kwargs.get(name, default))
for (name, default)
in zip(named_args, argspec.defaults or [])
])
seen_arg_names = {n for (n, _) in result}
keywords = dict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
if name not in seen_arg_names
]))
if varargs:
result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
if keywords:
result.append(("**%s" %(argspec.keywords, ), keywords))
return result
def short_repr(x, n=64):
""" A shortened repr of ``x`` which is guaranteed to be ``unicode``::
>>> short_repr("foo")
u"foo"
>>> short_repr("123456789", n=4)
u"12...89"
"""
x_repr = repr(x)
if isinstance(x_repr, bytes):
try:
x_repr = str(x_repr, "utf-8")
except UnicodeDecodeError:
x_repr = str(x_repr, "latin1")
if len(x_repr) > n:
x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
return x_repr
def default_doc_func(func, num, p):
if func.__doc__ is None:
return None
all_args_with_values = parameterized_argument_value_pairs(func, p)
# Assumes that the function passed is a bound method.
descs = [f'{n}={short_repr(v)}' for n, v in all_args_with_values]
# The documentation might be a multiline string, so split it
# and just work with the first string, ignoring the period
# at the end if there is one.
first, nl, rest = func.__doc__.lstrip().partition("\n")
suffix = ""
if first.endswith("."):
suffix = "."
first = first[:-1]
args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
return "".join([first.rstrip(), args, suffix, nl, rest])
def default_name_func(func, num, p):
base_name = func.__name__
name_suffix = "_%s" %(num, )
if len(p.args) > 0 and isinstance(p.args[0], (str,)):
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
return base_name + name_suffix
# force nose for numpy purposes.
_test_runner_override = 'nose'
_test_runner_guess = False
_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
_test_runner_aliases = {
"_pytest": "pytest",
}
def set_test_runner(name):
global _test_runner_override
if name not in _test_runners:
raise TypeError(
"Invalid test runner: %r (must be one of: %s)"
%(name, ", ".join(_test_runners)),
)
_test_runner_override = name
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test discovery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
else:
_test_runner_guess = None
return _test_runner_guess
class parameterized:
""" Parameterize a test case::
class TestInt:
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input, doc_func=None):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
wrapper.parameterized_input = self.get_input()
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
return wrapper
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
nose_func = MethodType(nose_func, test_self)
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
def _terrible_magic_get_defining_classes(self):
""" Returns the list of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
# Explicitly convert non-list inputs to a list so that:
# 1. A helpful exception will be raised if they aren't iterable, and
# 2. Generators are unwrapped exactly once (otherwise `nosetests
# --processes=n` has issues; see:
# https://github.com/wolever/nose-parameterized/pull/31)
if not isinstance(input_values, list):
input_values = list(input_values)
return [ param.from_decorator(p) for p in input_values ]
@classmethod
def expand(cls, input, name_func=None, doc_func=None, **legacy):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
>>> @parameterized.expand([("foo", 1, 2)])
... def test_add1(name, input, expected):
... actual = add1(input)
... assert_equal(actual, expected)
...
>>> locals()
... 'test_add1_foo_0': <function ...> ...
>>>
"""
if "testcase_func_name" in legacy:
warnings.warn("testcase_func_name= is deprecated; use name_func=",
DeprecationWarning, stacklevel=2)
if not name_func:
name_func = legacy["testcase_func_name"]
if "testcase_func_doc" in legacy:
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
DeprecationWarning, stacklevel=2)
if not doc_func:
doc_func = legacy["testcase_func_doc"]
doc_func = doc_func or default_doc_func
name_func = name_func or default_name_func
def parameterized_expand_wrapper(f, instance=None):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
parameters = cls.input_as_callable(input)()
for num, p in enumerate(parameters):
name = name_func(f, num, p)
frame_locals[name] = cls.param_as_standalone_func(p, f, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
f.__test__ = False
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
@wraps(func)
def standalone_func(*a):
return func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
# place_as is used by py.test to determine what source file should be
# used for this test.
standalone_func.place_as = func
# Remove __wrapped__ because py.test will try to look at __wrapped__
# to determine which parameters should be used with this test case,
# and obviously we don't need it to do any parameterization.
try:
del standalone_func.__wrapped__
except AttributeError:
pass
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
| bsd-3-clause |
Yong-Lee/django | tests/template_tests/filter_tests/test_dictsortreversed.py | 342 | 1066 | from django.template.defaultfilters import dictsortreversed
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsortreversed(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]],
)
def test_invalid_values(self):
"""
If dictsortreversed is passed something other than a list of
dictionaries, fail silently.
"""
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
| bsd-3-clause |
grap/OpenUpgrade | openerp/models.py | 2 | 291294 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistant object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import datetime
import functools
import itertools
import logging
import operator
import pytz
import re
import time
from collections import defaultdict, MutableMapping
from inspect import getmembers, currentframe
from operator import itemgetter
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import openerp
from . import SUPERUSER_ID
from . import api
from . import tools
from .api import Environment
from .exceptions import except_orm, AccessError, MissingError, ValidationError
from .osv import fields
from .osv.query import Query
from .tools import frozendict, lazy_property, ormcache
from .tools.config import config
from .tools.func import frame_codeinfo
from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT, pickle
from .tools.safe_eval import safe_eval as eval
from .tools.translate import _
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
regex_order = re.compile('^(\s*([a-z0-9:_]+|"[a-z0-9:_]+")(\s+(desc|asc))?\s*(,|$))+(?<!,)$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def same_name(f, g):
""" Test whether functions ``f`` and ``g`` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
# Explicit support for "falsy" digits (0, False) to indicate a
# NUMERIC field with no fixed precision. The values will be saved
# in the database with all significant digits.
# FLOAT8 type is still the default when there is no precision because
# it is faster for most operations (sums, etc.)
if f.digits is not None:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (f.selection and isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(api.Meta):
""" Metaclass for the models.
This class is used as the metaclass for the class :class:`BaseModel` to
discover the models defined in a module (without instantiating them).
If the automatic discovery is not needed, it is possible to set the model's
``_register`` attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
if not hasattr(self, '_module'):
# The (OpenERP) module name can be in the ``openerp.addons`` namespace
# or not. For instance, module ``sale`` can be imported as
# ``openerp.addons.sale`` (the right way) or ``sale`` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# check for new-api conversion error: leave comma after field definition
for key, val in attrs.iteritems():
if type(val) is tuple and len(val) == 1 and isinstance(val[0], Field):
_logger.error("Trailing comma after field definition: %s.%s", self, key)
# transform columns into new-style fields (enables field inheritance)
for name, column in self._columns.iteritems():
if name in self.__dict__:
_logger.warning("In class %s, field %r overriding an existing value", self, name)
setattr(self, name, column.to_field())
class NewId(object):
""" Pseudo-ids for new records. """
def __nonzero__(self):
return False
IdType = (int, long, basestring, NewId)
# maximum number of prefetched records
PREFETCH_MAX = 200
# special columns automatically created by the ORM
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* :class:`Model` for regular database-persisted models
* :class:`TransientModel` for temporary data, stored in the database but
automatically vacuumed every so often
* :class:`AbstractModel` for abstract super classes meant to be shared by
multiple inheriting model
The system automatically instantiates every model once per database. Those
instances represent the available models on each database, and depend on
which modules are installed on that database. The actual class of each
instance is built from the Python classes that create and inherit from the
corresponding model.
Every model instance is a "recordset", i.e., an ordered collection of
records of the model. Recordsets are returned by methods like
:meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
explicit representation: a record is represented as a recordset of one
record.
To create a class that should not be instantiated, the _register class
attribute may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
_translate = True # set to False to disable translations export for this model
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
_table = None
_log_create = False
_sql_constraints = []
# model dependencies, for models backed up by sql views:
# {model_name: field_names, ...}
_depends = {}
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
# OpenUpgrade edit start: In rare cases, an old module defined a field
# on a model that is not defined in another module earlier in the
# chain of inheritance. Then we need to assign the ir.model.fields'
# xmlid to this other module, otherwise the column would be dropped
# when uninstalling the first module.
# An example is res.partner#display_name defined in 7.0 by
# account_report_company, but now the field belongs to the base
# module
# Given that we arrive here in order of inheritance, we simply check
# if the field's xmlid belongs to a module already loaded, and if not,
# update the record with the correct module name.
cr.execute(
"SELECT f.*, d.module, d.id as xmlid_id "
"FROM ir_model_fields f LEFT JOIN ir_model_data d "
"ON f.id=d.res_id and d.model='ir.model.fields' WHERE f.model=%s",
(self._name,))
# OpenUpgrade edit end
cols = {}
for rec in cr.dictfetchall():
# OpenUpgrade start:
if 'module' in context and\
rec['module'] and\
rec['name'] in self._columns.keys() and\
rec['module'] != context.get('module') and\
rec['module'] not in self.pool._init_modules:
_logger.info(
'Moving XMLID for ir.model.fields record of %s#%s '
'from %s to %s',
self._name, rec['name'], rec['module'], context['module'])
cr.execute(
"UPDATE ir_model_data SET module=%(module)s "
"WHERE id=%(xmlid_id)s",
dict(rec, module=context['module']))
# OpenUpgrade end
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._fields.items(), key=lambda x: 1 if x[1].type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f.type,
'relation': f.comodel_name or '',
'select_level': tools.ustr(int(f.index)),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.search or f.store and 1) or 0,
'translate': (f.translate if hasattr(f,'translate') else False and 1) or 0,
'relation_field': f.inverse_name if hasattr(f, 'inverse_name') else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
self.invalidate_cache(cr, SUPERUSER_ID)
@classmethod
def _add_field(cls, name, field):
""" Add the given ``field`` under the given ``name`` in the class """
# add field as an attribute and in cls._fields (for reflection)
if not isinstance(getattr(cls, name, field), Field):
_logger.warning("In model %r, field %r overriding existing value", cls._name, name)
setattr(cls, name, field)
cls._fields[name] = field
# basic setup of field
field.set_class_name(cls, name)
# cls._columns will be updated once fields are set up
@classmethod
def _pop_field(cls, name):
""" Remove the field with the given ``name`` from the model.
This method should only be used for manual fields.
"""
field = cls._fields.pop(name)
cls._columns.pop(name, None)
if hasattr(cls, name):
delattr(cls, name)
return field
@classmethod
def _add_magic_fields(cls):
""" Introduce magic fields on the current class
* id is a "normal" field (with a specific getter)
* create_uid, create_date, write_uid and write_date have become
"normal" fields
* $CONCURRENCY_CHECK_FIELD is a computed field with its computing
method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
to get the same structure as the previous
``(now() at time zone 'UTC')::timestamp``::
# select (now() at time zone 'UTC')::timestamp;
timezone
----------------------------
2013-06-18 08:30:37.292809
>>> str(datetime.datetime.utcnow())
'2013-06-18 08:31:32.821177'
"""
def add(name, field):
""" add ``field`` with the given ``name`` if it does not exist yet """
if name not in cls._fields:
cls._add_field(name, field)
# cyclic import
from . import fields
# this field 'id' must override any other column or field
cls._add_field('id', fields.Id(automatic=True))
add('display_name', fields.Char(string='Display Name', automatic=True,
compute='_compute_display_name'))
if cls._log_access:
add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
add('create_date', fields.Datetime(string='Created on', automatic=True))
add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
last_modified_name = 'compute_concurrency_field_with_access'
else:
last_modified_name = 'compute_concurrency_field'
# this field must override any other column or field
cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', compute=last_modified_name, automatic=True))
@api.one
def compute_concurrency_field(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.one
@api.depends('create_date', 'write_date')
def compute_concurrency_field_with_access(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
self.write_date or self.create_date or \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#
# Goal: try to apply inheritance at the instantiation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instantiate a given model.
This class method instantiates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
"""
# The model's class inherits from cls and the classes of the inherited
# models. All those classes are combined in a flat hierarchy:
#
# Model the base class of all models
# / | \
# cls c2 c1 the classes defined in modules
# \ | /
# ModelClass the final class of the model
# / | \
# model recordset ... the class' instances
#
# The registry contains the instance ``model``. Its class, ``ModelClass``,
# carries inferred metadata that is shared between all the model's
# instances for this registry only. When we '_inherit' from another
# model, we do not inherit its ``ModelClass``, but this class' parents.
# This is a limitation of the inheritance mechanism.
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_constraints = cls.__dict__.get('_constraints', [])
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# determine inherited models
parents = getattr(cls, '_inherit', [])
parents = [parents] if isinstance(parents, basestring) else (parents or [])
# determine the model's name
name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
# determine the module that introduced the model
original_module = pool[name]._original_module if name in parents else cls._module
# determine all the classes the model should inherit from
bases = [cls]
hierarchy = cls
for parent in parents:
if parent not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent))
parent_class = type(pool[parent])
bases += parent_class.__bases__
hierarchy = type(name, (hierarchy, parent_class), {'_register': False})
# order bases following the mro of class hierarchy
bases = [base for base in hierarchy.mro() if base in bases]
# determine the attributes of the model's class
inherits = {}
depends = {}
constraints = {}
sql_constraints = []
for base in reversed(bases):
inherits.update(base._inherits)
for mname, fnames in base._depends.iteritems():
depends[mname] = depends.get(mname, []) + fnames
for cons in base._constraints:
# cons may override a constraint with the same function name
constraints[getattr(cons[0], '__name__', id(cons[0]))] = cons
sql_constraints += base._sql_constraints
# build the actual class of the model
ModelClass = type(name, tuple(bases), {
'_name': name,
'_register': False,
'_columns': None, # recomputed in _setup_fields()
'_defaults': None, # recomputed in _setup_base()
'_fields': frozendict(), # idem
'_inherits': inherits,
'_depends': depends,
'_constraints': constraints.values(),
'_sql_constraints': sql_constraints,
'_original_module': original_module,
})
# instantiate the model, and initialize it
model = object.__new__(ModelClass)
model.__init__(pool, cr)
return model
@classmethod
def _init_function_fields(cls, pool, cr):
# initialize the list of non-stored function fields for this model
pool._pure_function_fields[cls._name] = []
# process store of low-level function fields
for fname, column in cls._columns.iteritems():
# filter out existing store about this field
pool._store_function[cls._name] = [
stored
for stored in pool._store_function.get(cls._name, [])
if (stored[0], stored[1]) != (cls._name, fname)
]
if not isinstance(column, fields.function):
continue
if not column.store:
# register it on the pool for invalidation
pool._pure_function_fields[cls._name].append(fname)
continue
# process store parameter
store = column.store
if store is True:
get_ids = lambda self, cr, uid, ids, c={}: ids
store = {cls._name: (get_ids, None, column.priority, None)}
for model, spec in store.iteritems():
if len(spec) == 4:
(fnct, fields2, order, length) = spec
elif len(spec) == 3:
(fnct, fields2, order) = spec
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
pool._store_function.setdefault(model, [])
t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
if t not in pool._store_function[model]:
pool._store_function[model].append(t)
pool._store_function[model].sort(key=lambda x: x[4])
@classmethod
def _init_manual_fields(cls, cr, partial):
manual_fields = cls.pool.get_manual_fields(cr, cls._name)
for name, field in manual_fields.iteritems():
if name in cls._fields:
continue
attrs = {
'manual': True,
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
}
# FIXME: ignore field['serialization_field_id']
if field['ttype'] in ('char', 'text', 'html'):
attrs['translate'] = bool(field['translate'])
attrs['size'] = field['size'] or None
elif field['ttype'] in ('selection', 'reference'):
attrs['selection'] = eval(field['selection'])
elif field['ttype'] == 'many2one':
if partial and field['relation'] not in cls.pool:
continue
attrs['comodel_name'] = field['relation']
attrs['ondelete'] = field['on_delete']
attrs['domain'] = eval(field['domain']) if field['domain'] else None
elif field['ttype'] == 'one2many':
if partial and not (
field['relation'] in cls.pool and (
field['relation_field'] in cls.pool[field['relation']]._fields or
field['relation_field'] in cls.pool.get_manual_fields(cr, field['relation'])
)):
continue
attrs['comodel_name'] = field['relation']
attrs['inverse_name'] = field['relation_field']
attrs['domain'] = eval(field['domain']) if field['domain'] else None
elif field['ttype'] == 'many2many':
if partial and field['relation'] not in cls.pool:
continue
attrs['comodel_name'] = field['relation']
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
attrs['relation'] = 'x_%s_%s_%s_rel' % (_rel1, _rel2, name)
attrs['column1'] = 'id1'
attrs['column2'] = 'id2'
attrs['domain'] = eval(field['domain']) if field['domain'] else None
cls._add_field(name, Field.by_type[field['ttype']](**attrs))
@classmethod
def _init_constraints_onchanges(cls):
# store sql constraint error messages
for (key, _, msg) in cls._sql_constraints:
cls.pool._sql_error[cls._table + '_' + key] = msg
@property
def _constraint_methods(self):
""" Return a list of methods implementing Python constraints. """
def is_constraint(func):
return callable(func) and hasattr(func, '_constrains')
cls = type(self)
methods = []
for attr, func in getmembers(cls, is_constraint):
if not all(name in cls._fields for name in func._constrains):
_logger.warning("@constrains%r parameters must be field names", func._constrains)
methods.append(func)
# optimization: memoize result on cls, it will not be recomputed
cls._constraint_methods = methods
return methods
@property
def _onchange_methods(self):
""" Return a dictionary mapping field names to onchange methods. """
def is_onchange(func):
return callable(func) and hasattr(func, '_onchange')
cls = type(self)
methods = defaultdict(list)
for attr, func in getmembers(cls, is_onchange):
for name in func._onchange:
if name not in cls._fields:
_logger.warning("@onchange%r parameters must be field names", func._onchange)
methods[name].append(func)
# optimization: memoize result on cls, it will not be recomputed
cls._onchange_methods = methods
return methods
def __new__(cls):
# In the past, this method was registering the model class in the server.
# This job is now done entirely by the metaclass MetaModel.
#
# Do not create an instance here. Model instances are created by method
# _build_model().
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the registry,
- retrieve custom fields and add them in the model,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
cls = type(self)
# link the class to the registry, and update the registry
cls.pool = pool
cls._model = self # backward compatibility
pool.add(cls._name, self)
# determine description, table, sequence and log_access
if not cls._description:
cls._description = cls._name
if not cls._table:
cls._table = cls._name.replace('.', '_')
if not cls._sequence:
cls._sequence = cls._table + '_id_seq'
if not hasattr(cls, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
cls._log_access = cls._auto
# Transience
if cls.is_transient():
cls._transient_check_count = 0
cls._transient_max_count = config.get('osv_memory_count_limit')
cls._transient_max_hours = config.get('osv_memory_age_limit')
assert cls._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their access rights policy"
@api.model
@ormcache()
def _is_an_ordinary_table(self):
self.env.cr.execute("""\
SELECT 1
FROM pg_class
WHERE relname = %s
AND relkind = %s""", [self._table, 'r'])
return bool(self.env.cr.fetchone())
def __export_xml_id(self):
""" Return a valid xml_id for the record ``self``. """
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
ir_model_data = self.sudo().env['ir.model.data']
data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
if data:
if data[0].module:
return '%s.%s' % (data[0].module, data[0].name)
else:
return data[0].name
else:
postfix = 0
name = '%s_%s' % (self._table, self.id)
while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
postfix += 1
name = '%s_%s_%s' % (self._table, self.id, postfix)
ir_model_data.create({
'model': self._name,
'res_id': self.id,
'module': '__export__',
'name': name,
})
return '__export__.' + name
@api.multi
def __export_rows(self, fields):
""" Export fields of the records in ``self``.
:param fields: list of lists of fields to traverse
:return: list of lists of corresponding values
"""
lines = []
for record in self:
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = record.__export_xml_id()
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, self.env)
else:
primary_done.append(name)
# This is a special case, its strange behavior is intended!
if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
xml_ids = [r.__export_xml_id() for r in value]
current[i] = ','.join(xml_ids) or False
continue
# recursively export the fields that follow name
fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
lines2 = value.__export_rows(fields2)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val or isinstance(val, bool):
current[j] = val
# check value of current field
if not current[i] and not isinstance(current[i], bool):
# assign xml_ids, and forget about remaining lines
xml_ids = [item[1] for item in value.name_get()]
current[i] = ','.join(xml_ids)
else:
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = False
return lines
@api.multi
def export_data(self, fields_to_export, raw_data=False):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
if raw_data:
self = self.with_context(export_raw_data=True)
return {'datas': self.__export_rows(fields_to_export)}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
# use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT convert_records')
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
cr.execute('RELEASE SAVEPOINT convert_records')
except Exception, e:
cr.execute('ROLLBACK TO SAVEPOINT convert_records')
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except Exception, e:
message = (_('Unknown error during import:') +
' %s: %s' % (type(e), unicode(e)))
moreinfo = _('Resolve other errors first')
messages.append(dict(info, type='error',
message=message,
moreinfo=moreinfo))
# Failed for some reason, perhaps due to invalid data supplied,
# rollback savepoint and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _add_fake_fields(self, cr, uid, fields, context=None):
from openerp.fields import Char, Integer
fields[None] = Char('rec_name')
fields['id'] = Char('External ID')
fields['.id'] = Integer('Database ID')
return fields
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
fields = dict(self._fields)
# Fake fields to avoid special cases in extractor
fields = self._add_fake_fields(cr, uid, fields, context=context)
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: fields[field].relational
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if fields[field[0]].type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if fields[field[0]].type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[fields[relfield].comodel_name]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get field
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
Translation = self.pool['ir.translation']
fields = dict(self._fields)
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or field.string))
for f, field in fields.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
@api.multi
def _validate_fields(self, field_names):
field_names = set(field_names)
# old-style constraint methods
trans = self.env['ir.translation']
cr, uid, context = self.env.args
ids = self.ids
errors = []
for fun, msg, names in self._constraints:
try:
# validation must be context-independent; call ``fun`` without context
valid = names and not (set(names) & field_names)
valid = valid or fun(self._model, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
if callable(msg):
res_msg = msg(self._model, cr, uid, ids, context=context)
if isinstance(res_msg, tuple):
template, params = res_msg
res_msg = template % params
else:
res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
if extra_error:
res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
errors.append(
_("Field(s) `%s` failed against a constraint: %s") %
(', '.join(names), res_msg)
)
if errors:
raise ValidationError('\n'.join(errors))
# new-style constraint methods
for check in self._constraint_methods:
if set(check._constrains) & field_names:
try:
check(self)
except ValidationError, e:
raise
except Exception, e:
raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
@api.model
def default_get(self, fields_list):
""" default_get(fields) -> default_values
Return default values for the fields in ``fields_list``. Default
values are determined by the context, user defaults, and the model
itself.
:param fields_list: a list of field names
:return: a dictionary mapping each field name to its corresponding
default value, if it has one.
"""
# trigger view init hook
self.view_init(fields_list)
defaults = {}
parent_fields = defaultdict(list)
for name in fields_list:
# 1. look up context
key = 'default_' + name
if key in self._context:
defaults[name] = self._context[key]
continue
# 2. look up ir_values
# Note: performance is good, because get_defaults_dict is cached!
ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
if name in ir_values_dict:
defaults[name] = ir_values_dict[name]
continue
field = self._fields.get(name)
# 3. look up property fields
# TODO: get rid of this one
if field and field.company_dependent:
defaults[name] = self.env['ir.property'].get(name, self._name)
continue
# 4. look up field.default
if field and field.default:
defaults[name] = field.default(self)
continue
# 5. delegate to parent model
if field and field.inherited:
field = field.related_field
parent_fields[field.model_name].append(field.name)
# convert default values to the right format
defaults = self._convert_to_cache(defaults, validate=False)
defaults = self._convert_to_write(defaults)
# add default values for inherited fields
for model, names in parent_fields.iteritems():
defaults.update(self.env[model].default_get(names))
return defaults
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve ``groups`` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any(self.pool['res.users'].has_group(cr, uid, group_ext_id.strip())
for group_ext_id in groups.split(','))
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
group = etree.SubElement(view, 'group', col="4")
for fname, field in self._fields.iteritems():
if field.automatic or field.type in ('one2many', 'many2many'):
continue
etree.SubElement(group, 'field', name=fname)
if field.type == 'text':
etree.SubElement(group, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" fields_view_get([view_id | view_type='form'])
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', 'tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context from postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
'context': context,
}
def get_access_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific access to the document.
By default it opens the formview of the document.
:param int id: id of the document to open
"""
return self.get_formview_action(cr, uid, id, context=context)
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
""" search_count(args) -> int
Returns the number of records in the current model matching :ref:`the
provided domain <reference/orm/domains>`.
"""
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
@api.returns('self',
upgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else self.browse(value),
downgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else value.ids)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
""" search(args[, offset=0][, limit=None][, order=None][, count=False])
Searches for records based on the ``args``
:ref:`search domain <reference/orm/domains>`.
:param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param int offset: number of results to ignore (default: none)
:param int limit: maximum number of records to return (default: all)
:param str order: sort string
:param bool count: if True, only counts and returns the number of matching records (default: False)
:returns: at most ``limit`` records matching the search criteria
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
#
# display_name, name_get, name_create, name_search
#
@api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
def _compute_display_name(self):
names = dict(self.name_get())
for record in self:
record.display_name = names.get(record.id, False)
@api.multi
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
name = self._rec_name
if name in self._fields:
convert = self._fields[name].convert_to_display_name
for record in self:
result.append((record.id, convert(record[name], record)))
else:
for record in self:
result.append((record.id, "%s,%s" % (record._name, record.id)))
return result
@api.model
def name_create(self, name):
""" name_create(name) -> record
Create a new record by calling :meth:`~.create` with only one value
provided: the display name of the new record.
The new record will be initialized with any default values
applicable to this model, or provided through the context. The usual
behavior of :meth:`~.create` applies.
:param name: display name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value of the created record
"""
if self._rec_name:
record = self.create({self._rec_name: name})
return record.name_get()[0]
else:
_logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
return False
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
""" name_search(name='', args=None, operator='ilike', limit=100) -> records
Search for records that have a display name matching the given
``name`` pattern when compared with the given ``operator``, while also
matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial
value for a relational field. Sometimes be seen as the inverse
function of :meth:`~.name_get`, but it is not guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search
domain based on ``display_name`` and then :meth:`~.name_get` on the
result of the search.
:param str name: the name pattern to match
:param list args: optional search domain (see :meth:`~.search` for
syntax), specifying further restrictions
:param str operator: domain operator for matching ``name``, such as
``'like'`` or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id, text_repr)`` for all matching records.
"""
return self._name_search(name, args, operator, limit=limit)
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
# private implementation of name_search, allows passing a dedicated user
# for the name_get part to solve some access rights issues
args = list(args or [])
# optimize out the default criterion of ``ilike ''`` that matches everything
if not self._rec_name:
_logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
elif not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
# avoid overriding inherited values when parent is set
avoid_tables = []
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
# compute missing fields
missing_defaults = set()
for field in self._columns.keys():
if not field in values:
missing_defaults.add(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.add(field)
# discard magic fields
missing_defaults -= set(MAGIC_COLUMNS)
if missing_defaults:
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, list(missing_defaults), context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
self.pool.cache.clear_prefix((self._name,))
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
aggregated_fields, count_field,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
known_values[grouped_value].update({count_field: left_side[count_field]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._fields[order_field.split(':')[0]].type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time information, qualified name, ...
"""
split = gb.split(':')
field_type = self._fields[split[0]].type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(self._table, split[0], query)
if temporal:
display_formats = {
# Careful with week/year formats:
# - yyyy (lower) must always be used, *except* for week+year formats
# - YYYY (upper) must always be used for week+year format
# e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
# and W1 2006 for others
#
# Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
# such as 2006-01-01 being formatted as "January 2005" in some locales.
# Cfr: http://babel.pocoo.org/docs/dates/#date-fields
'day': 'dd MMM yyyy', # yyyy = normal year
'week': "'W'w YYYY", # w YYYY = ISO week-year
'month': 'MMMM yyyy',
'quarter': 'QQQ yyyy',
'year': 'yyyy',
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictionary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._fields):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if f in self._fields
if self._fields[f].type in ('integer', 'float')
if getattr(self._fields[f].base_field.column, '_classic_write', False)
]
field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(self._table, f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return fetched_data
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, count_field, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, alias, field, query, implicit=True, outer=False):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param alias: name of the initial SQL alias
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
# INVARIANT: alias is the SQL alias of model._table in query
model = self
while field in model._inherit_fields and field not in model._columns:
# retrieve the parent model where field is inherited from
parent_model_name = model._inherit_fields[field][0]
parent_model = self.pool[parent_model_name]
parent_field = model._inherits[parent_model_name]
# JOIN parent_model._table AS parent_alias ON alias.parent_field = parent_alias.id
parent_alias, _ = query.add_join(
(alias, parent_model._table, parent_field, 'id', parent_field),
implicit=implicit, outer=outer,
)
model, alias = parent_model, parent_alias
return '"%s"."%s"' % (alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if f._type == 'boolean' or val is not False:
cr.execute(update_query, (ss[1](val), key))
@api.model
def _check_selection_field_value(self, field, value):
""" Check whether value is among the valid values for the given
selection/reference field, and raise an exception if not.
"""
field = self._fields[field]
field.convert_to_cache(value, self)
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
self.invalidate_cache(cr, SUPERUSER_ID)
# checked version: for direct m2o starting from ``self``
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _set_default_value_on_column(self, cr, column_name, context=None):
# ideally, we should use default_get(), but it fails due to ir.values
# not being ready
# get default value
default = self._defaults.get(column_name)
if callable(default):
default = default(self, cr, SUPERUSER_ID, context)
column = self._columns[column_name]
ss = column._symbol_set
db_default = ss[1](default)
# Write default if non-NULL, except for booleans for which False means
# the same as NULL - this saves us an expensive query on large tables.
write_default = (db_default is not None if column._type != 'boolean'
else db_default)
if write_default:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, default)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
self._table, column_name, ss[0], column_name)
cr.execute(query, (db_default,))
# this is a disgrace
cr.commit()
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
# This prevents anything called by this method (in particular default
# values) from prefetching a field for which the corresponding column
# has not been added in database yet!
context = dict(context or {}, prefetch_fields=False)
# Make sure an environment is available for get_pg_type(). This is
# because we access column.digits, which retrieves a cursor from
# existing environments.
env = api.Environment(cr, SUPERUSER_ID, context)
store_compute = False
stored_fields = [] # new-style stored fields with compute
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
has_rows = False
else:
cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
has_rows = cr.rowcount
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k == 'id': # FIXME: maybe id should be a regular column?
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
res = self._m2m_raise_or_create_relation(cr, f)
if res and self._fields[k].depends:
stored_fields.append(self._fields[k])
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type in ('char', 'selection') and f_pg_size and (f.size is None or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)), log_exceptions=False)
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.warning("Table `%s`: column `%s` has changed type (DB=%s, def=%s), data moved to column `%s`",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# add the NOT NULL constraint
try:
# use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint');
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.execute('RELEASE SAVEPOINT add_constraint');
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._auto and dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# remember new-style stored fields with compute method
if k in self._fields and self._fields[k].depends:
stored_fields.append(self._fields[k])
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if dest_model._auto and ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
#use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint');
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
cr.execute('RELEASE SAVEPOINT add_constraint');
except Exception:
cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k, exc_info=True)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
if stored_fields:
# trigger computation of new-style stored fields with a compute
def func(cr):
_logger.info("Storing computed values of %s fields %s",
self._name, ', '.join(sorted(f.name for f in stored_fields)))
recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
recs = recs.search([])
if recs:
map(recs._recompute_todo, stored_fields)
recs.recompute()
todo_end.append((1000, func, ()))
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
""" Create the table for the relation if necessary.
Return ``True`` if the relation had to be created.
"""
m2m_tbl, col1, col2 = f._sql_names(self)
# do not create relations for custom fields as they do not belong to a module
# they will be automatically removed when dropping the corresponding ir.model.field
# table name for custom relation all starts with x_, see __init__
if not m2m_tbl.startswith('x_'):
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
return True
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
# use savepoints for openupgrade instead of transactions
cr.execute('SAVEPOINT add_constraint2');
cr.execute(sql_action['query'])
cr.execute('RELEASE SAVEPOINT add_constraint2');
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.execute('ROLLBACK TO SAVEPOINT add_constraint2');
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
@classmethod
def _init_inherited_fields(cls):
""" Determine inherited fields. """
# determine candidate inherited fields
fields = {}
for parent_model, parent_field in cls._inherits.iteritems():
parent = cls.pool[parent_model]
for name, field in parent._fields.iteritems():
# inherited fields are implemented as related fields, with the
# following specific properties:
# - reading inherited fields should not bypass access rights
# - copy inherited fields iff their original field is copied
fields[name] = field.new(
inherited=True,
related=(parent_field, name),
related_sudo=False,
copy=field.copy,
)
# add inherited fields that are not redefined locally
for name, field in fields.iteritems():
if name not in cls._fields:
cls._add_field(name, field)
@classmethod
def _inherits_reload(cls):
""" Recompute the _inherit_fields mapping. """
cls._inherit_fields = struct = {}
for parent_model, parent_field in cls._inherits.iteritems():
parent = cls.pool[parent_model]
parent._inherits_reload()
for name, column in parent._columns.iteritems():
struct[name] = (parent_model, parent_field, column, parent_model)
for name, source in parent._inherit_fields.iteritems():
struct[name] = (parent_model, parent_field, source[2], source[3])
@property
def _all_columns(self):
""" Returns a dict mapping all fields names (self fields and inherited
field via _inherits) to a ``column_info`` object giving detailed column
information. This property is deprecated, use ``_fields`` instead.
"""
result = {}
# do not inverse for loops, since local fields may hide inherited ones!
for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in self._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
@classmethod
def _inherits_check(cls):
for table, field_name in cls._inherits.items():
field = cls._fields.get(field_name)
if not field:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
from .fields import Many2one
field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
cls._add_field(field_name, field)
elif not field.required or field.ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
field.required = True
field.ondelete = "cascade"
# reflect fields with delegate=True in dictionary cls._inherits
for field in cls._fields.itervalues():
if field.type == 'many2one' and not field.related and field.delegate:
if not field.required:
_logger.warning("Field %s with delegate=True must be required.", field)
field.required = True
if field.ondelete.lower() not in ('cascade', 'restrict'):
field.ondelete = 'cascade'
cls._inherits[field.comodel_name] = field.name
@api.model
def _prepare_setup(self):
""" Prepare the setup of the model. """
type(self)._setup_done = False
@api.model
def _setup_base(self, partial):
""" Determine the inherited and custom fields of the model. """
cls = type(self)
if cls._setup_done:
return
# 1. determine the proper fields of the model; duplicate them on cls to
# avoid clashes with inheritance between different models
for name in getattr(cls, '_fields', {}):
delattr(cls, name)
# retrieve fields from parent classes
cls._fields = {}
cls._defaults = {}
for attr, field in getmembers(cls, Field.__instancecheck__):
cls._add_field(attr, field.new())
# add magic and custom fields
cls._add_magic_fields()
cls._init_manual_fields(self._cr, partial)
# 2. make sure that parent models determine their own fields, then add
# inherited fields to cls
cls._inherits_check()
for parent in cls._inherits:
self.env[parent]._setup_base(partial)
cls._init_inherited_fields()
cls._setup_done = True
@api.model
def _setup_fields(self):
""" Setup the fields, except for recomputation triggers. """
cls = type(self)
# set up fields, and determine their corresponding column
cls._columns = {}
for name, field in cls._fields.iteritems():
field.setup(self.env)
column = field.to_column()
if column:
cls._columns[name] = column
# determine field.computed_fields
computed_fields = defaultdict(list)
for field in cls._fields.itervalues():
if field.compute:
computed_fields[field.compute].append(field)
for fields in computed_fields.itervalues():
for field in fields:
field.computed_fields = fields
@api.model
def _setup_complete(self):
""" Setup recomputation triggers, and complete the model setup. """
cls = type(self)
# set up field triggers
for field in cls._fields.itervalues():
field.setup_triggers(self.env)
# add invalidation triggers on model dependencies
if cls._depends:
triggers = [(field, None) for field in cls._fields.itervalues()]
for model_name, field_names in cls._depends.iteritems():
model = self.env[model_name]
for field_name in field_names:
field = model._fields[field_name]
for trigger in triggers:
field.add_trigger(trigger)
# determine old-api structures about inherited fields
cls._inherits_reload()
# register stuff about low-level function fields
cls._init_function_fields(cls.pool, self._cr)
# register constraints and onchange methods
cls._init_constraints_onchanges()
# check defaults
for name in cls._defaults:
assert name in cls._fields, \
"Model %s has a default for nonexiting field %s" % (cls._name, name)
# validate rec_name
if cls._rec_name:
assert cls._rec_name in cls._fields, \
"Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
elif 'name' in cls._fields:
cls._rec_name = 'name'
elif 'x_name' in cls._fields:
cls._rec_name = 'x_name'
def fields_get(self, cr, user, allfields=None, context=None, write_access=True, attributes=None):
""" fields_get([fields][, attributes])
Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param allfields: list of fields to document, all if empty or not provided
:param attributes: list of description attributes to return for each field, all if empty or not provided
"""
recs = self.browse(cr, user, [], context)
has_access = functools.partial(recs.check_access_rights, raise_exception=False)
readonly = not (has_access('write') or has_access('create'))
res = {}
for fname, field in self._fields.iteritems():
if allfields and fname not in allfields:
continue
if not field.setup_done:
continue
if field.groups and not recs.user_has_groups(field.groups):
continue
description = field.get_description(recs.env)
if readonly:
description['readonly'] = True
description['states'] = {}
if attributes:
description = {k: v for k, v in description.iteritems()
if k in attributes}
res[fname] = description
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
if user == SUPERUSER_ID:
return fields or list(self._fields)
def valid(fname):
""" determine whether user has access to field ``fname`` """
field = self._fields.get(fname)
if field and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(valid, self._fields)
else:
invalid_fields = set(filter(lambda name: not valid(name), fields))
if invalid_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
operation, user, self._name, ', '.join(invalid_fields))
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
# add explicit old-style implementation to read()
@api.v7
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
records = self.browse(cr, user, ids, context)
result = BaseModel.read(records, fields, load=load)
return result if isinstance(ids, list) else (bool(result) and result[0])
# new-style implementation of read()
@api.v8
def read(self, fields=None, load='_classic_read'):
""" read([fields])
Reads the requested fields for the records in ``self``, low-level/RPC
method. In Python code, prefer :meth:`~.browse`.
:param fields: list of field names to return (default is all fields)
:return: a list of dictionaries mapping field names to their values,
with one dictionary per record
:raise AccessError: if user has no read rights on some of the given
records
"""
# check access rights
self.check_access_rights('read')
fields = self.check_field_access_rights('read', fields)
# split fields into stored and computed fields
stored, inherited, computed = [], [], []
for name in fields:
if name in self._columns:
stored.append(name)
elif name in self._fields:
computed.append(name)
field = self._fields[name]
if field.inherited and field.base_field.column:
inherited.append(name)
else:
_logger.warning("%s.read() with unknown field '%s'", self._name, name)
# fetch stored fields from the database to the cache
self._read_from_database(stored, inherited)
# retrieve results from records; this takes values from the cache and
# computes remaining fields
result = []
name_fields = [(name, self._fields[name]) for name in (stored + computed)]
use_name_get = (load == '_classic_read')
for record in self:
try:
values = {'id': record.id}
for name, field in name_fields:
values[name] = field.convert_to_read(record[name], use_name_get)
result.append(values)
except MissingError:
pass
return result
@api.multi
def _prefetch_field(self, field):
""" Read from the database in order to fetch ``field`` (:class:`Field`
instance) for ``self`` in cache.
"""
# fetch the records of this model without field_name in their cache
records = self._in_cache_without(field)
if len(records) > PREFETCH_MAX:
records = records[:PREFETCH_MAX] | self
# determine which fields can be prefetched
if not self.env.in_draft and \
self._context.get('prefetch_fields', True) and \
self._columns[field.name]._prefetch:
# prefetch all classic and many2one fields that the user can access
fnames = {fname
for fname, fcolumn in self._columns.iteritems()
if fcolumn._prefetch
if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
}
elif self._columns[field.name]._multi:
# prefetch all function fields with the same value for 'multi'
multi = self._columns[field.name]._multi
fnames = {fname
for fname, fcolumn in self._columns.iteritems()
if fcolumn._multi == multi
if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
}
else:
fnames = {field.name}
# important: never prefetch fields to recompute!
get_recs_todo = self.env.field_todo
for fname in list(fnames):
if get_recs_todo(self._fields[fname]):
if fname == field.name:
records -= get_recs_todo(field)
else:
fnames.discard(fname)
# fetch records with read()
assert self in records and field.name in fnames
result = []
try:
result = records.read(list(fnames), load='_classic_write')
except AccessError:
# not all records may be accessible, try with only current record
result = self.read(list(fnames), load='_classic_write')
# check the cache, and update it if necessary
if field not in self._cache:
for values in result:
record = self.browse(values.pop('id'))
record._cache.update(record._convert_to_cache(values, validate=False))
if not self._cache.contains(field):
e = AccessError("No value found for %s.%s" % (self, field.name))
self._cache[field] = FailedValue(e)
@api.multi
def _read_from_database(self, field_names, inherited_field_names=[]):
""" Read the given fields of the records in ``self`` from the database,
and store them in cache. Access errors are also stored in cache.
:param field_names: list of column names of model ``self``; all those
fields are guaranteed to be read
:param inherited_field_names: list of column names from parent
models; some of those fields may not be read
"""
env = self.env
cr, user, context = env.args
# make a query object for selecting ids, and apply security rules to it
query = Query(['"%s"' % self._table], ['"%s".id IN %%s' % self._table], [])
self._apply_ir_rules(query, 'read')
order_str = self._generate_order_by(None, query)
# determine the fields that are stored as columns in tables;
# for the sake of simplicity, discard inherited translated fields
fields = map(self._fields.get, field_names + inherited_field_names)
fields_pre = [
field
for field in fields
if field.base_field.column._classic_write
if not (field.inherited and field.base_field.column.translate)
]
# the query may involve several tables: we need fully-qualified names
def qualify(field):
col = field.name
if field.inherited:
res = self._inherits_join_calc(self._table, field.name, query)
else:
res = '"%s"."%s"' % (self._table, col)
if field.type == 'binary' and (context.get('bin_size') or context.get('bin_size_' + col)):
# PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
res = 'pg_size_pretty(length(%s)::bigint) as "%s"' % (res, col)
return res
qual_names = map(qualify, set(fields_pre + [self._fields['id']]))
# determine the actual query to execute
from_clause, where_clause, where_params = query.get_sql()
query_str = """ SELECT %(qual_names)s FROM %(from_clause)s
WHERE %(where_clause)s %(order_str)s
""" % {
'qual_names': ",".join(qual_names),
'from_clause': from_clause,
'where_clause': where_clause,
'order_str': order_str,
}
result = []
for sub_ids in cr.split_for_in_conditions(self.ids):
cr.execute(query_str, [tuple(sub_ids)] + where_params)
result.extend(cr.dictfetchall())
ids = [vals['id'] for vals in result]
if ids:
# translate the fields if necessary
if context.get('lang'):
ir_translation = env['ir.translation']
for field in fields_pre:
if not field.inherited and field.column.translate:
f = field.name
#TODO: optimize out of this loop
res_trans = ir_translation._get_ids(
'%s,%s' % (self._name, f), 'model', context['lang'], ids)
for vals in result:
vals[f] = res_trans.get(vals['id'], False) or vals[f]
# apply the symbol_get functions of the fields we just read
for field in fields_pre:
symbol_get = field.base_field.column._symbol_get
if symbol_get:
f = field.name
for vals in result:
vals[f] = symbol_get(vals[f])
# store result in cache for POST fields
for vals in result:
record = self.browse(vals['id'])
record._cache.update(record._convert_to_cache(vals, validate=False))
# determine the fields that must be processed now;
# for the sake of simplicity, we ignore inherited fields
fields_post = [f for f in field_names if not self._columns[f]._classic_write]
# Compute POST fields, grouped by multi
by_multi = defaultdict(list)
for f in fields_post:
by_multi[self._columns[f]._multi].append(f)
for multi, fs in by_multi.iteritems():
if multi:
res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (fs[0], self._name)
for vals in result:
# TOCHECK : why got string instend of dict in python2.6
# if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
multi_fields = res2.get(vals['id'], {})
if multi_fields:
for f in fs:
vals[f] = multi_fields.get(f, [])
else:
for f in fs:
res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
for vals in result:
if res2:
vals[f] = res2[vals['id']]
else:
vals[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
for f in field_names:
column = self._columns[f]
if column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
# store result in cache
for vals in result:
record = self.browse(vals.pop('id'))
record._cache.update(record._convert_to_cache(vals, validate=False))
# store failed values in cache for the records that could not be read
fetched = self.browse(ids)
missing = self - fetched
if missing:
extras = fetched - self
if extras:
raise AccessError(
_("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
', '.join(map(repr, missing._ids)),
', '.join(map(repr, extras._ids)),
))
# store an access error exception in existing records
exc = AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._name, 'read')
)
forbidden = missing.exists()
forbidden._cache.update(FailedValue(exc))
# store a missing error exception in non-existing records
exc = MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.')
)
(missing - forbidden)._cache.update(FailedValue(exc))
@api.multi
def get_metadata(self):
"""
Returns some metadata about the given records.
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
* noupdate: A boolean telling if the record will be updated or not
"""
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.noupdate, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
self._cr.execute(query, (self._name, tuple(self.ids)))
res = self._cr.dictfetchall()
uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
names = dict(self.env['res.users'].browse(uids).name_get())
for r in res:
for key in r:
value = r[key] = r[key] or False
if key in ('write_uid', 'create_uid') and value in names:
r[key] = (value, names[value])
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of ``ids``, and raise an appropriate exception if it does not.
"""
if context is None:
context = {}
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
""" unlink()
Deletes the records of the current set
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
# for recomputing new-style fields
recs = self.browse(cr, uid, ids, context)
recs.modified(self._fields)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
ir_attachment_obj = self.pool.get('ir.attachment')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
# For the same reason, removing the record relevant to ir_attachment
# The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
if ir_attachment_ids:
ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
recs.invalidate_cache()
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = set(store_ids) - set(ids)
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
# recompute new-style fields
recs.recompute()
return True
#
# TODO: Validate
#
@api.multi
def write(self, vals):
""" write(vals)
Updates all records in the current set with the provided values.
:param dict vals: fields to update and the value to set on them e.g::
{'foo': 1, 'bar': "Qux"}
will set the field ``foo`` to ``1`` and the field ``bar`` to
``"Qux"`` if those are valid (otherwise it will trigger an error).
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
* For numeric fields (:class:`~openerp.fields.Integer`,
:class:`~openerp.fields.Float`) the value should be of the
corresponding type
* For :class:`~openerp.fields.Boolean`, the value should be a
:class:`python:bool`
* For :class:`~openerp.fields.Selection`, the value should match the
selection values (generally :class:`python:str`, sometimes
:class:`python:int`)
* For :class:`~openerp.fields.Many2one`, the value should be the
database identifier of the record to set
* Other non-relational fields use a string for value
.. danger::
for historical and compatibility reasons,
:class:`~openerp.fields.Date` and
:class:`~openerp.fields.Datetime` fields use strings as values
(written and read) rather than :class:`~python:datetime.date` or
:class:`~python:datetime.datetime`. These date strings are
UTC-only and formatted according to
:const:`openerp.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
:const:`openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
* .. _openerp/models/relationals/format:
:class:`~openerp.fields.One2many` and
:class:`~openerp.fields.Many2many` use a special "commands" format to
manipulate the set of records stored in/associated with the field.
This format is a list of triplets executed sequentially, where each
triplet is a command to execute on the set of records. Not all
commands apply in all situations. Possible commands are:
``(0, _, values)``
adds a new record created from the provided ``value`` dict.
``(1, id, values)``
updates an existing record of id ``id`` with the values in
``values``. Can not be used in :meth:`~.create`.
``(2, id, _)``
removes the record of id ``id`` from the set, then deletes it
(from the database). Can not be used in :meth:`~.create`.
``(3, id, _)``
removes the record of id ``id`` from the set, but does not
delete it. Can not be used on
:class:`~openerp.fields.One2many`. Can not be used in
:meth:`~.create`.
``(4, id, _)``
adds an existing record of id ``id`` to the set. Can not be
used on :class:`~openerp.fields.One2many`.
``(5, _, _)``
removes all records from the set, equivalent to using the
command ``3`` on every record explicitly. Can not be used on
:class:`~openerp.fields.One2many`. Can not be used in
:meth:`~.create`.
``(6, _, ids)``
replaces all existing records in the set by the ``ids`` list,
equivalent to using the command ``5`` followed by a command
``4`` for each ``id`` in ``ids``. Can not be used on
:class:`~openerp.fields.One2many`.
.. note:: Values marked as ``_`` in the list above are ignored and
can be anything, generally ``0`` or ``False``.
"""
if not self:
return True
self._check_concurrency(self._ids)
self.check_access_rights('write')
# No user-driven update of these columns
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
field = self._fields.get(key)
if field:
if field.column or field.inherited:
old_vals[key] = val
if field.inverse and not field.inherited:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# write old-style fields with (low-level) method _write
if old_vals:
self._write(old_vals)
# put the values of pure new-style fields into cache, and inverse them
if new_vals:
for record in self:
record._cache.update(record._convert_to_cache(new_vals, update=True))
for key in new_vals:
self._fields[key].determine_inverse(self)
return True
def _write(self, cr, user, ids, vals, context=None):
# low-level implementation of write()
if not context:
context = {}
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
deleted_related = defaultdict(list)
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
if fobj._type in ['one2many', 'many2many'] and vals[field]:
for wtuple in vals[field]:
if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
deleted_related[fobj._obj].append(wtuple[1])
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# for recomputing new-style fields
recs = self.browse(cr, user, ids, context)
modified_fields = list(vals)
if self._log_access:
modified_fields += ['write_date', 'write_uid']
recs.modified(modified_fields)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
updates = [] # list of (column, expr) or (column, pattern, value)
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
ffield = self._fields.get(field)
if ffield and ffield.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
if field in self._columns:
column = self._columns[field]
if hasattr(column, 'selection') and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if column._classic_write and not hasattr(column, '_fnct_inv'):
if (not totranslate) or not column.translate:
updates.append((field, column._symbol_set[0], column._symbol_set[1](vals[field])))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if self._log_access:
updates.append(('write_uid', '%s', user))
updates.append(('write_date', "(now() at time zone 'UTC')"))
direct.append('write_uid')
direct.append('write_date')
if updates:
self.check_access_rule(cr, user, ids, 'write', context=context)
query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
)
params = tuple(u[2] for u in updates if len(u) > 2)
for sub_ids in cr.split_for_in_conditions(set(ids)):
cr.execute(query, params + (sub_ids,))
if cr.rowcount != len(sub_ids):
raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# invalidate and mark new-style fields to recompute; do this before
# setting other fields, because it can require the value of computed
# fields, e.g., a one2many checking constraints on records
recs.modified(direct)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
# for recomputing new-style fields
recs.modified(upd_todo)
unknown_fields = set(updend)
for table, inherit_field in self._inherits.iteritems():
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for fname in updend:
field = self._fields[fname]
if field.inherited and field.related[0] == inherit_field:
v[fname] = vals[fname]
unknown_fields.discard(fname)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
# check Python constraints
recs._validate_fields(vals)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
recs.invalidate_cache(['parent_left', 'parent_right'])
result += self._store_get_values(cr, user, ids, vals.keys(), context)
done = {}
recs.env.recompute_old.extend(result)
while recs.env.recompute_old:
sorted_recompute_old = sorted(recs.env.recompute_old)
recs.env.clear_recompute_old()
for __, model_name, ids_to_update, fields_to_recompute in \
sorted_recompute_old:
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
if id not in deleted_related[model_name]:
todo.append(id)
self.pool[model_name]._store_set_values(
cr, user, todo, fields_to_recompute, context)
# recompute new-style fields
if recs.env.recompute and context.get('recompute', True):
recs.recompute()
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
""" create(vals) -> record
Creates a new record for the model.
The new record is initialized using the values from ``vals`` and
if necessary those from :meth:`~.default_get`.
:param dict vals:
values for the model's fields, as a dictionary::
{'field_name': field_value, ...}
see :meth:`~.write` for details
:return: new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
"""
self.check_access_rights('create')
# add missing defaults, and drop fields that may not be set by user
vals = self._add_missing_default_values(vals)
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
field = self._fields.get(key)
if field:
if field.column or field.inherited:
old_vals[key] = val
if field.inverse and not field.inherited:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# create record with old-style fields
record = self.browse(self._create(old_vals))
# put the values of pure new-style fields into cache, and inverse them
record._cache.update(record._convert_to_cache(new_vals))
for key in new_vals:
self._fields[key].determine_inverse(record)
return record
def _create(self, cr, user, vals, context=None):
# low-level implementation of create()
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
updates = [
# list of column assignments defined as tuples like:
# (column_name, format_string, column_value)
# (column_name, sql_formula)
# Those tuples will be used by the string formatting for the INSERT
# statement below.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
updates.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
updates.append((field, current_field._symbol_set[0], current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
updates.append(('create_uid', '%s', user))
updates.append(('write_uid', '%s', user))
updates.append(('create_date', "(now() at time zone 'UTC')"))
updates.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % u[0] for u in updates),
', '.join(u[1] for u in updates)
),
tuple([u[2] for u in updates if len(u) > 2])
)
id_new, = cr.fetchone()
recs = self.browse(cr, user, id_new, context)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
recs.invalidate_cache(['parent_left', 'parent_right'])
# invalidate and mark new-style fields to recompute; do this before
# setting other fields, because it can require the value of computed
# fields, e.g., a one2many checking constraints on records
recs.modified(self._fields)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
# for recomputing new-style fields
recs.modified(upd_todo)
# check Python constraints
recs._validate_fields(vals)
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
recs.env.recompute_old.extend(result)
if recs.env.recompute and context.get('recompute', True):
done = []
while recs.env.recompute_old:
sorted_recompute_old = sorted(recs.env.recompute_old)
recs.env.clear_recompute_old()
for __, model_name, ids, fields2 in sorted_recompute_old:
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(
cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
# recompute new-style fields
recs.recompute()
if self._log_create and recs.env.recompute and context.get('recompute', True):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = (
f for f in stored_functions
if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
)
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if target_func_id_ not in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
result = []
if call_map:
result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
updates = [] # list of (column, pattern, value)
for v in value:
if v not in val:
continue
column = self._columns[v]
if column._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
updates.append((v, column._symbol_set[0], column._symbol_set[1](value[v])))
if updates:
query = 'UPDATE "%s" SET %s WHERE id = %%s' % (
self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
)
params = tuple(u[2] for u in updates)
cr.execute(query, params + (id,))
else:
for f in val:
column = self._columns[f]
# use admin user for accessing objects having rules defined on store fields
result = column.get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if column._type == 'many2one':
try:
value = value[0]
except:
pass
query = 'UPDATE "%s" SET "%s"=%s WHERE id = %%s' % (
self._table, f, column._symbol_set[0],
)
cr.execute(query, (column._symbol_set[1](value), id))
# invalidate and mark new-style fields to recompute
self.browse(cr, uid, ids, context).modified(fields)
return True
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._fields and active_test and context.get('active_test', True):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None):
""" :param parent_model: name of the parent model, if the added
clause comes from a parent model
"""
if added_clause:
if parent_model:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = self._inherits_join_add(self, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model)
def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
order_field_column = self._inherit_fields[order_field][2]
qualified_field = self._inherits_join_calc(alias, order_field, query)
alias, order_field = qualified_field.replace('"', '').split('.', 1)
else:
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored "
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return []
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
join = (alias, dest_model._table, order_field, 'id', order_field)
dst_alias, dst_alias_statement = query.add_join(join, implicit=False, outer=True)
return dest_model._generate_order_by_inner(dst_alias, m2o_order, query,
reverse_direction=reverse_direction, seen=seen)
def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
if seen is None:
seen = set()
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
if reverse_direction:
order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
do_reverse = order_direction == 'DESC'
order_column = None
inner_clauses = []
add_dir = False
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clauses = ['"%s"."%s"' % (alias, order_field)]
add_dir = True
elif order_column._type == 'many2one':
key = (self._name, order_column._obj, order_field)
if key not in seen:
seen.add(key)
inner_clauses = self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clauses = [self._inherits_join_calc(alias, order_field, query, implicit=False, outer=True)]
add_dir = True
elif order_column._type == 'many2one':
key = (parent_obj._name, order_column._obj, order_field)
if key not in seen:
seen.add(key)
inner_clauses = self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError(_("Sorting field %s not found on model %s") % (order_field, self._name))
if order_column and order_column._type == 'boolean':
inner_clauses = ["COALESCE(%s, false)" % inner_clauses[0]]
for clause in inner_clauses:
if add_dir:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append(clause)
return order_by_elements
def _generate_order_by(self, order_spec, query):
"""
Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict access to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
if count:
# Ignore order, limit and offset when just counting, they don't make sense and could
# hurt performance
query_str = 'SELECT count(1) FROM ' + from_clause + where_str
cr.execute(query_str, where_clause_params)
res = cr.fetchone()
return res[0]
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
if '__copy_data_seen' not in context:
context = dict(context, __copy_data_seen=defaultdict(list))
seen_map = context['__copy_data_seen']
if id in seen_map[self._name]:
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._fields) - whitelist)
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._fields.iteritems():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
if fi.copy
if f not in default
if f not in blacklist)
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, field in fields_to_copy.iteritems():
if field.type == 'many2one':
res[f] = data[f] and data[f][0]
elif field.type == 'one2many':
other = self.pool[field.comodel_name]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field.type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
if '__copy_translations_seen' not in context:
context = dict(context, __copy_translations_seen=defaultdict(list))
seen_map = context['__copy_translations_seen']
if old_id in seen_map[self._name]:
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
for field_name, field in self._fields.iteritems():
if not field.copy:
continue
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field.type == 'one2many':
target_obj = self.pool[field.comodel_name]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif getattr(field, 'translate', False):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
# duplicated record is not linked to any module
del record['module']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
@api.returns('self', lambda value: value.id)
def copy(self, cr, uid, id, default=None, context=None):
""" copy(default=None)
Duplicate record with given id updating it with default values
:param dict default: dictionary of field values to override in the
original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
:returns: new record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
@api.multi
@api.returns('self')
def exists(self):
""" exists() -> records
Returns the subset of records in ``self`` that exist, and marks deleted
records as such in cache. It can be used as a test on records::
if record.exists():
...
By convention, new records are returned as existing.
"""
ids, new_ids = [], []
for i in self._ids:
(ids if isinstance(i, (int, long)) else new_ids).append(i)
if not ids:
return self
query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
self._cr.execute(query, [tuple(ids)])
ids = [r[0] for r in self._cr.fetchall()]
existing = self.browse(ids + new_ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(_("Record does not exist or has been deleted."))
(self - existing)._cache.update(FailedValue(exc))
return existing
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._fields.get(field_name)
if not (field and field.type == 'many2many' and
field.comodel_name == self._name and field.store):
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
(field.column2, field.relation, field.column1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report ``name`` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
@classmethod
def is_transient(cls):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return cls._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands or []:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._fields[field_name].comodel_name]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
@classmethod
def _patch_method(cls, name, method):
""" Monkey-patch a method for all instances of this model. This replaces
the method called ``name`` by ``method`` in the given class.
The original method is then accessible via ``method.origin``, and it
can be restored with :meth:`~._revert_method`.
Example::
@api.multi
def do_write(self, values):
# do stuff, and call the original method
return do_write.origin(self, values)
# patch method write of model
model._patch_method('write', do_write)
# this will call do_write
records = model.search([...])
records.write(...)
# restore the original method
model._revert_method('write')
"""
origin = getattr(cls, name)
method.origin = origin
# propagate decorators from origin to method, and apply api decorator
wrapped = api.guess(api.propagate(origin, method))
wrapped.origin = origin
setattr(cls, name, wrapped)
@classmethod
def _revert_method(cls, name):
""" Revert the original method called ``name`` in the given class.
See :meth:`~._patch_method`.
"""
method = getattr(cls, name)
setattr(cls, name, method.origin)
#
# Instance creation
#
# An instance represents an ordered collection of records in a given
# execution environment. The instance object refers to the environment, and
# the records themselves are represented by their cache dictionary. The 'id'
# of each record is found in its corresponding cache dictionary.
#
# This design has the following advantages:
# - cache access is direct and thus fast;
# - one can consider records without an 'id' (see new records);
# - the global cache is only an index to "resolve" a record 'id'.
#
@classmethod
def _browse(cls, env, ids):
""" Create an instance attached to ``env``; ``ids`` is a tuple of record
ids.
"""
records = object.__new__(cls)
records.env = env
records._ids = ids
env.prefetch[cls._name].update(ids)
return records
@api.v7
def browse(self, cr, uid, arg=None, context=None):
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(Environment(cr, uid, context or {}), ids)
@api.v8
def browse(self, arg=None):
""" browse([ids]) -> records
Returns a recordset for the ids provided as parameter in the current
environment.
Can take no ids, a single id or a sequence of ids.
"""
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(self.env, ids)
#
# Internal properties, for manipulating the instance's implementation
#
@property
def ids(self):
""" List of actual record ids in this recordset (ignores placeholder
ids for records to create)
"""
return filter(None, list(self._ids))
# backward-compatibility with former browse records
_cr = property(lambda self: self.env.cr)
_uid = property(lambda self: self.env.uid)
_context = property(lambda self: self.env.context)
#
# Conversion methods
#
def ensure_one(self):
""" Verifies that the current recorset holds a single record. Raises
an exception otherwise.
"""
if len(self) == 1:
return self
raise except_orm("ValueError", "Expected singleton: %s" % self)
def with_env(self, env):
""" Returns a new version of this recordset attached to the provided
environment
.. warning::
The new environment will not benefit from the current
environment's data cache, so later data access may incur extra
delays while re-fetching from the database.
:type env: :class:`~openerp.api.Environment`
"""
return self._browse(env, self._ids)
def sudo(self, user=SUPERUSER_ID):
""" sudo([user=SUPERUSER])
Returns a new version of this recordset attached to the provided
user.
By default this returns a `SUPERUSER` recordset, where access control
and record rules are bypassed.
.. note::
Using `sudo` could cause data access to cross the boundaries of
record rules, possibly mixing records that are meant to be
isolated (e.g. records from different companies in multi-company
environments).
It may lead to un-intuitive results in methods which select one
record among many - for example getting the default company, or
selecting a Bill of Materials.
.. note::
Because the record rules and access control will have to be
re-evaluated, the new recordset will not benefit from the current
environment's data cache, so later data access may incur extra
delays while re-fetching from the database.
"""
return self.with_env(self.env(user=user))
def with_context(self, *args, **kwargs):
""" with_context([context][, **overrides]) -> records
Returns a new version of this recordset attached to an extended
context.
The extended context is either the provided ``context`` in which
``overrides`` are merged or the *current* context in which
``overrides`` are merged e.g.::
# current context is {'key1': True}
r2 = records.with_context({}, key2=True)
# -> r2._context is {'key2': True}
r2 = records.with_context(key2=True)
# -> r2._context is {'key1': True, 'key2': True}
"""
context = dict(args[0] if args else self._context, **kwargs)
return self.with_env(self.env(context=context))
def _convert_to_cache(self, values, update=False, validate=True):
""" Convert the ``values`` dictionary into cached values.
:param update: whether the conversion is made for updating ``self``;
this is necessary for interpreting the commands of *2many fields
:param validate: whether values must be checked
"""
fields = self._fields
target = self if update else self.browse()
return {
name: fields[name].convert_to_cache(value, target, validate=validate)
for name, value in values.iteritems()
if name in fields
}
def _convert_to_write(self, values):
""" Convert the ``values`` dictionary into the format of :meth:`write`. """
fields = self._fields
result = {}
for name, value in values.iteritems():
if name in fields:
value = fields[name].convert_to_write(value)
if not isinstance(value, NewId):
result[name] = value
return result
#
# Record traversal and update
#
def _mapped_func(self, func):
""" Apply function ``func`` on all records in ``self``, and return the
result as a list or a recordset (if ``func`` returns recordsets).
"""
if self:
vals = [func(rec) for rec in self]
if isinstance(vals[0], BaseModel):
# return the union of all recordsets in O(n)
ids = set(itertools.chain(*[rec._ids for rec in vals]))
return vals[0].browse(ids)
return vals
else:
vals = func(self)
return vals if isinstance(vals, BaseModel) else []
def mapped(self, func):
""" Apply ``func`` on all records in ``self``, and return the result as a
list or a recordset (if ``func`` return recordsets). In the latter
case, the order of the returned recordset is arbitrary.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
recs = self
for name in func.split('.'):
recs = recs._mapped_func(operator.itemgetter(name))
return recs
else:
return self._mapped_func(func)
def _mapped_cache(self, name_seq):
""" Same as `~.mapped`, but ``name_seq`` is a dot-separated sequence of
field names, and only cached values are used.
"""
recs = self
for name in name_seq.split('.'):
field = recs._fields[name]
null = field.null(self.env)
recs = recs.mapped(lambda rec: rec._cache.get(field, null))
return recs
def filtered(self, func):
""" Select the records in ``self`` such that ``func(rec)`` is true, and
return them as a recordset.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
name = func
func = lambda rec: filter(None, rec.mapped(name))
return self.browse([rec.id for rec in self if func(rec)])
def sorted(self, key=None, reverse=False):
""" Return the recordset ``self`` ordered by ``key``.
:param key: either a function of one argument that returns a
comparison key for each record, or ``None``, in which case
records are ordered according the default model's order
:param reverse: if ``True``, return the result in reverse order
"""
if key is None:
recs = self.search([('id', 'in', self.ids)])
return self.browse(reversed(recs._ids)) if reverse else recs
else:
return self.browse(map(itemgetter('id'), sorted(self, key=key, reverse=reverse)))
def update(self, values):
""" Update record `self[0]` with ``values``. """
for name, value in values.iteritems():
self[name] = value
#
# New records - represent records that do not exist in the database yet;
# they are used to perform onchanges.
#
@api.model
def new(self, values={}):
""" new([values]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
"""
record = self.browse([NewId()])
record._cache.update(record._convert_to_cache(values, update=True))
if record.env.in_onchange:
# The cache update does not set inverse fields, so do it manually.
# This is useful for computing a function field on secondary
# records, if that field depends on the main record.
for name in values:
field = self._fields.get(name)
if field:
for invf in field.inverse_fields:
invf._update(record[name], record)
return record
#
# Dirty flags, to mark record fields modified (in draft mode)
#
def _is_dirty(self):
""" Return whether any record in ``self`` is dirty. """
dirty = self.env.dirty
return any(record in dirty for record in self)
def _get_dirty(self):
""" Return the list of field names for which ``self`` is dirty. """
dirty = self.env.dirty
return list(dirty.get(self, ()))
def _set_dirty(self, field_name):
""" Mark the records in ``self`` as dirty for the given ``field_name``. """
dirty = self.env.dirty
for record in self:
dirty[record].add(field_name)
#
# "Dunder" methods
#
def __nonzero__(self):
""" Test whether ``self`` is nonempty. """
return bool(getattr(self, '_ids', True))
def __len__(self):
""" Return the size of ``self``. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over ``self``. """
for id in self._ids:
yield self._browse(self.env, (id,))
def __contains__(self, item):
""" Test whether ``item`` (record or field name) is an element of ``self``.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, basestring):
return item in self._fields
else:
raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
def __add__(self, other):
""" Return the concatenation of two recordsets. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
return self.browse(self._ids + other._ids)
def __sub__(self, other):
""" Return the recordset of all the records in ``self`` that are not in ``other``. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
other_ids = set(other._ids)
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
return self.browse(set(self._ids) & set(other._ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
return self.browse(set(self._ids) | set(other._ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
filename, lineno = frame_codeinfo(currentframe(), 1)
_logger.warning("Comparing apples and oranges: %r == %r (%s:%s)",
self, other, filename, lineno)
return False
return self._name == other._name and set(self._ids) == set(other._ids)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
return set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id
def __str__(self):
return "%s%s" % (self._name, getattr(self, '_ids', ""))
def __unicode__(self):
return unicode(str(self))
__repr__ = __str__
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If ``key`` is an integer or a slice, return the corresponding record
selection as an instance (attached to ``self.env``).
Otherwise read the field ``key`` of the first record in ``self``.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, basestring):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self._browse(self.env, self._ids[key])
else:
return self._browse(self.env, (self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field ``key`` to ``value`` in record ``self``. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@lazy_property
def _cache(self):
""" Return the cache of ``self``, mapping field names to values. """
return RecordCache(self)
@api.model
def _in_cache_without(self, field):
""" Make sure ``self`` is present in cache (for prefetching), and return
the records of model ``self`` in cache that have no value for ``field``
(:class:`Field` instance).
"""
env = self.env
prefetch_ids = env.prefetch[self._name]
prefetch_ids.update(self._ids)
ids = filter(None, prefetch_ids - set(env.cache[field]))
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.invalidate_all()
fields = self._fields.values()
else:
fields = map(self._fields.__getitem__, fnames)
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in f.inverse_fields]
self.env.invalidate(spec)
@api.multi
def modified(self, fnames):
""" Notify that fields have been modified on ``self``. This invalidates
the cache, and prepares the recomputation of stored function fields
(new-style fields only).
:param fnames: iterable of field names that have been modified on
records ``self``
"""
# each field knows what to invalidate and recompute
spec = []
for fname in fnames:
spec += self._fields[fname].modified(self)
cached_fields = {
field
for env in self.env.all
for field in env.cache
}
# invalidate non-stored fields.function which are currently cached
spec += [(f, None) for f in self.pool.pure_function_fields
if f in cached_fields]
self.env.invalidate(spec)
def _recompute_check(self, field):
""" If ``field`` must be recomputed on some record in ``self``, return the
corresponding records that must be recomputed.
"""
return self.env.check_todo(field, self)
def _recompute_todo(self, field):
""" Mark ``field`` to be recomputed. """
self.env.add_todo(field, self)
def _recompute_done(self, field):
""" Mark ``field`` as recomputed. """
self.env.remove_todo(field, self)
@api.model
def recompute(self):
""" Recompute stored function fields. The fields and records to
recompute have been determined by method :meth:`modified`.
"""
while self.env.has_todo():
field, recs = self.env.get_todo()
# evaluate the fields to recompute, and save them to database
names = [
f.name
for f in field.computed_fields
if f.store and self.env.field_todo(f)
]
for rec in recs:
try:
values = rec._convert_to_write({
name: rec[name] for name in names
})
with rec.env.norecompute():
rec._write(values)
except MissingError:
pass
# mark the computed fields as done
map(recs._recompute_done, field.computed_fields)
#
# Generic onchange method
#
def _has_onchange(self, field, other_fields):
""" Return whether ``field`` should trigger an onchange event in the
presence of ``other_fields``.
"""
# test whether self has an onchange method for field, or field is a
# dependency of any field in other_fields
return field.name in self._onchange_methods or \
any(dep in other_fields for dep in field.dependents)
@api.model
def _onchange_spec(self, view_info=None):
""" Return the onchange spec from a view description; if not given, the
result of ``self.fields_view_get()`` is used.
"""
result = {}
# for traversing the XML arch and populating result
def process(node, info, prefix):
if node.tag == 'field':
name = node.attrib['name']
names = "%s.%s" % (prefix, name) if prefix else name
if not result.get(names):
result[names] = node.attrib.get('on_change')
# traverse the subviews included in relational fields
for subinfo in info['fields'][name].get('views', {}).itervalues():
process(etree.fromstring(subinfo['arch']), subinfo, names)
else:
for child in node:
process(child, info, prefix)
if view_info is None:
view_info = self.fields_view_get()
process(etree.fromstring(view_info['arch']), view_info, '')
return result
def _onchange_eval(self, field_name, onchange, result):
""" Apply onchange method(s) for field ``field_name`` with spec ``onchange``
on record ``self``. Value assignments are applied on ``self``, while
domain and warning messages are put in dictionary ``result``.
"""
onchange = onchange.strip()
# onchange V8
if onchange in ("1", "true"):
for method in self._onchange_methods.get(field_name, ()):
method_res = method(self)
if not method_res:
continue
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
if result.get('warning'):
if method_res['warning']:
# Concatenate multiple warnings
warning = result['warning']
warning['message'] = '\n\n'.join(filter(None, [
warning.get('title'),
warning.get('message'),
method_res['warning'].get('title'),
method_res['warning'].get('message')
]))
warning['title'] = _('Warnings')
else:
result['warning'] = method_res['warning']
return
# onchange V7
match = onchange_v7.match(onchange)
if match:
method, params = match.groups()
# evaluate params -> tuple
global_vars = {'context': self._context, 'uid': self._uid}
if self._context.get('field_parent'):
class RawRecord(object):
def __init__(self, record):
self._record = record
def __getattr__(self, name):
field = self._record._fields[name]
value = self._record[name]
return field.convert_to_onchange(value)
record = self[self._context['field_parent']]
global_vars['parent'] = RawRecord(record)
field_vars = {
key: self._fields[key].convert_to_onchange(val)
for key, val in self._cache.iteritems()
}
params = eval("[%s]" % params, global_vars, field_vars)
# call onchange method with context when possible
args = (self._cr, self._uid, self._origin.ids) + tuple(params)
try:
method_res = getattr(self._model, method)(*args, context=self._context)
except TypeError:
method_res = getattr(self._model, method)(*args)
if not isinstance(method_res, dict):
return
if 'value' in method_res:
method_res['value'].pop('id', None)
self.update(self._convert_to_cache(method_res['value'], validate=False))
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
if result.get('warning'):
if method_res['warning']:
# Concatenate multiple warnings
warning = result['warning']
warning['message'] = '\n\n'.join(filter(None, [
warning.get('title'),
warning.get('message'),
method_res['warning'].get('title'),
method_res['warning'].get('message')
]))
warning['title'] = _('Warnings')
else:
result['warning'] = method_res['warning']
@api.multi
def onchange(self, values, field_name, field_onchange):
""" Perform an onchange on the given field.
:param values: dictionary mapping field names to values, giving the
current state of modification
:param field_name: name of the modified field, or list of field
names (in view order), or False
:param field_onchange: dictionary mapping field names to their
on_change attribute
"""
env = self.env
if isinstance(field_name, list):
names = field_name
elif field_name:
names = [field_name]
else:
names = []
if not all(name in self._fields for name in names):
return {}
# determine subfields for field.convert_to_write() below
secondary = []
subfields = defaultdict(set)
for dotname in field_onchange:
if '.' in dotname:
secondary.append(dotname)
name, subname = dotname.split('.')
subfields[name].add(subname)
# create a new record with values, and attach ``self`` to it
with env.do_in_onchange():
record = self.new(values)
values = dict(record._cache)
# attach ``self`` with a different context (for cache consistency)
record._origin = self.with_context(__onchange=True)
# load fields on secondary records, to avoid false changes
with env.do_in_onchange():
for field_seq in secondary:
record.mapped(field_seq)
# determine which field(s) should be triggered an onchange
todo = list(names) or list(values)
done = set()
# dummy assignment: trigger invalidations on the record
for name in todo:
if name == 'id':
continue
value = record[name]
field = self._fields[name]
if field.type == 'many2one' and field.delegate and not value:
# do not nullify all fields of parent record for new records
continue
record[name] = value
result = {'value': {}}
# process names in order (or the keys of values if no name given)
while todo:
name = todo.pop(0)
if name in done:
continue
done.add(name)
with env.do_in_onchange():
# apply field-specific onchange methods
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
# force re-evaluation of function fields on secondary records
for field_seq in secondary:
record.mapped(field_seq)
# determine which fields have been modified
for name, oldval in values.iteritems():
field = self._fields[name]
newval = record[name]
if field.type in ('one2many', 'many2many'):
if newval != oldval or newval._is_dirty():
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.append(name)
else:
# keep result: newval may have been dirty before
pass
else:
if newval != oldval:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.append(name)
else:
# clean up result to not return another value
result['value'].pop(name, None)
# At the moment, the client does not support updates on a *2many field
# while this one is modified by the user.
if field_name and not isinstance(field_name, list) and \
self._fields[field_name].type in ('one2many', 'many2many'):
result['value'].pop(field_name, None)
return result
class RecordCache(MutableMapping):
""" Implements a proxy dictionary to read/update the cache of a record.
Upon iteration, it looks like a dictionary mapping field names to
values. However, fields may be used as keys as well.
"""
def __init__(self, records):
self._recs = records
def contains(self, field):
""" Return whether `records[0]` has a value for ``field`` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
return self._recs.id in self._recs.env.cache[field]
def __contains__(self, field):
""" Return whether `records[0]` has a regular value for ``field`` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
dummy = SpecialValue(None)
value = self._recs.env.cache[field].get(self._recs.id, dummy)
return not isinstance(value, SpecialValue)
def __getitem__(self, field):
""" Return the cached value of ``field`` for `records[0]`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
value = self._recs.env.cache[field][self._recs.id]
return value.get() if isinstance(value, SpecialValue) else value
def __setitem__(self, field, value):
""" Assign the cached value of ``field`` for all records in ``records``. """
if isinstance(field, basestring):
field = self._recs._fields[field]
values = dict.fromkeys(self._recs._ids, value)
self._recs.env.cache[field].update(values)
def update(self, *args, **kwargs):
""" Update the cache of all records in ``records``. If the argument is a
``SpecialValue``, update all fields (except "magic" columns).
"""
if args and isinstance(args[0], SpecialValue):
values = dict.fromkeys(self._recs._ids, args[0])
for name, field in self._recs._fields.iteritems():
if name != 'id':
self._recs.env.cache[field].update(values)
else:
return super(RecordCache, self).update(*args, **kwargs)
def __delitem__(self, field):
""" Remove the cached value of ``field`` for all ``records``. """
if isinstance(field, basestring):
field = self._recs._fields[field]
field_cache = self._recs.env.cache[field]
for id in self._recs._ids:
field_cache.pop(id, None)
def __iter__(self):
""" Iterate over the field names with a regular value in cache. """
cache, id = self._recs.env.cache, self._recs.id
dummy = SpecialValue(None)
for name, field in self._recs._fields.iteritems():
if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
yield name
def __len__(self):
""" Return the number of fields with a regular value in cache. """
return sum(1 for name in self)
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vacuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
tools.ustr(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': tools.ustr(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
tools.ustr(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': tools.ustr(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': tools.ustr(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg)
# keep those imports here to avoid dependency cycle errors
from .osv import expression
from .fields import Field, SpecialValue, FailedValue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.