repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
apache/incubator-trafodion | install/python-installer/scripts/traf_sqconfig.py | 2 | 2762 | #!/usr/bin/env python
# @@@ START COPYRIGHT @@@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@
### this script should be run on first node with trafodion user ###
import os
import sys
import json
from common import run_cmd, err
def run():
dbcfgs = json.loads(dbcfgs_json)
nodes = dbcfgs['node_list'].split(',')
scratch_locs = dbcfgs['scratch_locs'].split(',')
# this script is running by trafodion user, so get sqroot from env
traf_conf = os.environ['TRAF_CONF']
if traf_conf == '': err('TRAF_CONF var is empty')
sqconfig_file = traf_conf + '/sqconfig'
traf_var = os.environ['TRAF_VAR']
if traf_var == '': err('TRAF_VAR var is empty')
sqconfig_db_file = traf_var + '/sqconfig.db'
# If the configuration database file is not yet created,
# build the 'sqconfig' file with the nodes specified and compile it.
if not os.path.exists(sqconfig_db_file):
core, processor = run_cmd("lscpu|grep -E '(^CPU\(s\)|^Socket\(s\))'|awk '{print $2}'").split('\n')[:2]
core = int(core)-1 if int(core) <= 256 else 255
lines = ['begin node\n']
for node_id, node in enumerate(nodes):
line = 'node-id=%s;node-name=%s;cores=0-%d;processors=%s;roles=connection,aggregation,storage\n' % (node_id, node, core, processor)
lines.append(line)
lines.append('end node\n')
lines.append('\n')
lines.append('begin overflow\n')
for scratch_loc in scratch_locs:
line = 'hdd %s\n' % scratch_loc
lines.append(line)
lines.append('end overflow\n')
# write out the node section
with open(sqconfig_file, 'w') as f:
f.writelines(lines)
print 'sqconfig generated successfully!'
run_cmd('sqgen')
print 'sqgen ran successfully!'
else:
print 'Using existing configuration (%s)' % sqconfig_file
# main
try:
dbcfgs_json = sys.argv[1]
except IndexError:
err('No db config found')
run()
| apache-2.0 |
theo-l/django | tests/check_framework/test_templates.py | 29 | 3577 | from copy import copy, deepcopy
from django.core.checks.templates import (
E001, E002, check_setting_app_dirs_loaders,
check_string_if_invalid_is_string,
)
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(check_setting_app_dirs_loaders(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(check_setting_app_dirs_loaders(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(check_setting_app_dirs_loaders(None), [])
class CheckTemplateStringIfInvalidTest(SimpleTestCase):
TEMPLATES_STRING_IF_INVALID = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'string_if_invalid': False,
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'string_if_invalid': 42,
},
},
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.error1 = copy(E002)
cls.error2 = copy(E002)
string_if_invalid1 = cls.TEMPLATES_STRING_IF_INVALID[0]['OPTIONS']['string_if_invalid']
string_if_invalid2 = cls.TEMPLATES_STRING_IF_INVALID[1]['OPTIONS']['string_if_invalid']
cls.error1.msg = cls.error1.msg.format(string_if_invalid1, type(string_if_invalid1).__name__)
cls.error2.msg = cls.error2.msg.format(string_if_invalid2, type(string_if_invalid2).__name__)
@override_settings(TEMPLATES=TEMPLATES_STRING_IF_INVALID)
def test_string_if_invalid_not_string(self):
self.assertEqual(check_string_if_invalid_is_string(None), [self.error1, self.error2])
def test_string_if_invalid_first_is_string(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
TEMPLATES[0]['OPTIONS']['string_if_invalid'] = 'test'
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(check_string_if_invalid_is_string(None), [self.error2])
def test_string_if_invalid_both_are_strings(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
TEMPLATES[0]['OPTIONS']['string_if_invalid'] = 'test'
TEMPLATES[1]['OPTIONS']['string_if_invalid'] = 'test'
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(check_string_if_invalid_is_string(None), [])
def test_string_if_invalid_not_specified(self):
TEMPLATES = deepcopy(self.TEMPLATES_STRING_IF_INVALID)
del TEMPLATES[1]['OPTIONS']['string_if_invalid']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(check_string_if_invalid_is_string(None), [self.error1])
| bsd-3-clause |
entropy1337/infernal-twin | Modules/build/reportlab/src/reportlab/graphics/charts/areas.py | 30 | 4432 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/areas.py
__version__=''' $Id$ '''
__doc__='''This module defines a Area mixin classes'''
from reportlab.lib.validators import isNumber, isColor, isColorOrNone, isNoneOrShape
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics.shapes import Rect, Group, Line, Polygon
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import grey
class PlotArea(Widget):
"Abstract base class representing a chart's plot area, pretty unusable by itself."
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc='X position of the lower-left corner of the chart.'),
y = AttrMapValue(isNumber, desc='Y position of the lower-left corner of the chart.'),
width = AttrMapValue(isNumber, desc='Width of the chart.'),
height = AttrMapValue(isNumber, desc='Height of the chart.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of the plot area border.'),
strokeWidth = AttrMapValue(isNumber, desc='Width plot area border.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color of the plot area interior.'),
background = AttrMapValue(isNoneOrShape, desc='Handle to background object e.g. Rect(0,0,width,height).'),
debug = AttrMapValue(isNumber, desc='Used only for debugging.'),
)
def __init__(self):
self.x = 20
self.y = 10
self.height = 85
self.width = 180
self.strokeColor = None
self.strokeWidth = 1
self.fillColor = None
self.background = None
self.debug = 0
def makeBackground(self):
if self.background is not None:
BG = self.background
if isinstance(BG,Group):
g = BG
for bg in g.contents:
bg.x = self.x
bg.y = self.y
bg.width = self.width
bg.height = self.height
else:
g = Group()
if type(BG) not in (type(()),type([])): BG=(BG,)
for bg in BG:
bg.x = self.x
bg.y = self.y
bg.width = self.width
bg.height = self.height
g.add(bg)
return g
else:
strokeColor,strokeWidth,fillColor=self.strokeColor, self.strokeWidth, self.fillColor
if (strokeWidth and strokeColor) or fillColor:
g = Group()
_3d_dy = getattr(self,'_3d_dy',None)
x = self.x
y = self.y
h = self.height
w = self.width
if _3d_dy is not None:
_3d_dx = self._3d_dx
if fillColor and not strokeColor:
from reportlab.lib.colors import Blacker
c = Blacker(fillColor, getattr(self,'_3d_blacken',0.7))
else:
c = strokeColor
if not strokeWidth: strokeWidth = 0.5
if fillColor or strokeColor or c:
bg = Polygon([x,y,x,y+h,x+_3d_dx,y+h+_3d_dy,x+w+_3d_dx,y+h+_3d_dy,x+w+_3d_dx,y+_3d_dy,x+w,y],
strokeColor=strokeColor or c or grey, strokeWidth=strokeWidth, fillColor=fillColor)
g.add(bg)
g.add(Line(x,y,x+_3d_dx,y+_3d_dy, strokeWidth=0.5, strokeColor=c))
g.add(Line(x+_3d_dx,y+_3d_dy, x+_3d_dx,y+h+_3d_dy,strokeWidth=0.5, strokeColor=c))
fc = Blacker(c, getattr(self,'_3d_blacken',0.8))
g.add(Polygon([x,y,x+_3d_dx,y+_3d_dy,x+w+_3d_dx,y+_3d_dy,x+w,y],
strokeColor=strokeColor or c or grey, strokeWidth=strokeWidth, fillColor=fc))
bg = Line(x+_3d_dx,y+_3d_dy, x+w+_3d_dx,y+_3d_dy,strokeWidth=0.5, strokeColor=c)
else:
bg = None
else:
bg = Rect(x, y, w, h,
strokeColor=strokeColor, strokeWidth=strokeWidth, fillColor=fillColor)
if bg: g.add(bg)
return g
else:
return None
| gpl-3.0 |
DavidLP/home-assistant | tests/components/qwikswitch/test_init.py | 7 | 3667 | """Test qwikswitch sensors."""
import logging
import pytest
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.components.qwikswitch import DOMAIN as QWIKSWITCH
from homeassistant.bootstrap import async_setup_component
from tests.test_util.aiohttp import mock_aiohttp_client
from aiohttp.client_exceptions import ClientError
_LOGGER = logging.getLogger(__name__)
class AiohttpClientMockResponseList(list):
"""Return multiple values for aiohttp Mocker.
aoihttp mocker uses decode to fetch the next value.
"""
def decode(self, _):
"""Return next item from list."""
try:
res = list.pop(self, 0)
_LOGGER.debug("MockResponseList popped %s: %s", res, self)
if isinstance(res, Exception):
raise res
return res
except IndexError:
raise AssertionError("MockResponseList empty")
async def wait_till_empty(self, hass):
"""Wait until empty."""
while self:
await hass.async_block_till_done()
await hass.async_block_till_done()
LISTEN = AiohttpClientMockResponseList()
@pytest.fixture
def aioclient_mock():
"""HTTP client listen and devices."""
devices = """[
{"id":"@000001","name":"Switch 1","type":"rel","val":"OFF",
"time":"1522777506","rssi":"51%"},
{"id":"@000002","name":"Light 2","type":"rel","val":"ON",
"time":"1522777507","rssi":"45%"},
{"id":"@000003","name":"Dim 3","type":"dim","val":"280c00",
"time":"1522777544","rssi":"62%"}]"""
with mock_aiohttp_client() as mock_session:
mock_session.get("http://127.0.0.1:2020/&listen", content=LISTEN)
mock_session.get("http://127.0.0.1:2020/&device", text=devices)
yield mock_session
async def test_binary_sensor_device(hass, aioclient_mock): # noqa
"""Test a binary sensor device."""
config = {
'qwikswitch': {
'sensors': {
'name': 's1',
'id': '@a00001',
'channel': 1,
'type': 'imod',
}
}
}
await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_block_till_done()
state_obj = hass.states.get('binary_sensor.s1')
assert state_obj.state == 'off'
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
LISTEN.append('{"id":"@a00001","cmd":"","data":"4e0e1601","rssi":"61%"}')
LISTEN.append(ClientError()) # Will cause a sleep
await hass.async_block_till_done()
state_obj = hass.states.get('binary_sensor.s1')
assert state_obj.state == 'on'
LISTEN.append('{"id":"@a00001","cmd":"","data":"4e0e1701","rssi":"61%"}')
hass.data[QWIKSWITCH]._sleep_task.cancel()
await LISTEN.wait_till_empty(hass)
state_obj = hass.states.get('binary_sensor.s1')
assert state_obj.state == 'off'
async def test_sensor_device(hass, aioclient_mock): # noqa
"""Test a sensor device."""
config = {
'qwikswitch': {
'sensors': {
'name': 'ss1',
'id': '@a00001',
'channel': 1,
'type': 'qwikcord',
}
}
}
await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_block_till_done()
state_obj = hass.states.get('sensor.ss1')
assert state_obj.state == 'None'
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
LISTEN.append(
'{"id":"@a00001","name":"ss1","type":"rel",'
'"val":"4733800001a00000"}')
await hass.async_block_till_done()
state_obj = hass.states.get('sensor.ss1')
assert state_obj.state == '416'
| apache-2.0 |
dyyi/moneybook | venv/Lib/site-packages/wheel/archive.py | 233 | 2286 | """
Archive tools for wheel.
"""
import os
import time
import logging
import os.path
import zipfile
log = logging.getLogger("wheel")
def archive_wheelfile(base_name, base_dir):
'''Archive all files under `base_dir` in a whl file and name it like
`base_name`.
'''
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir)
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# Some applications need reproducible .whl files, but they can't do this
# without forcing the timestamp of the individual ZipInfo objects. See
# issue #143.
timestamp = os.environ.get('SOURCE_DATE_EPOCH')
if timestamp is None:
date_time = None
else:
date_time = time.gmtime(int(timestamp))[0:6]
# XXX support bz2, xz when available
zip = zipfile.ZipFile(open(zip_filename, "wb+"), "w",
compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path, date_time):
st = os.stat(path)
if date_time is None:
mtime = time.gmtime(st.st_mtime)
date_time = mtime[0:6]
zinfo = zipfile.ZipInfo(path, date_time)
zinfo.external_attr = st.st_mode << 16
zinfo.compress_type = zipfile.ZIP_DEFLATED
with open(path, 'rb') as fp:
zip.writestr(zinfo, fp.read())
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
if dirpath.endswith('.dist-info'):
deferred.append((score.get(name, 0), path))
else:
writefile(path, date_time)
deferred.sort()
for score, path in deferred:
writefile(path, date_time)
zip.close()
return zip_filename
| apache-2.0 |
SHornung1/AliPhysics | PWGMM/MC/aligenqa/aligenqa/utils.py | 37 | 7697 | import os
import random
import string
import subprocess
import re
from rootpy import asrootpy
from rootpy.plotting import Graph
def gen_random_name():
"""Generate a random name for temp hists"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(25))
def get_est_dirs(sums, considered_ests):
return (somedir for somedir in sums if somedir.GetName() in considered_ests)
def make_estimator_title(name):
if name == 'EtaLt05':
return '|#eta|#leq0.5'
elif name == 'EtaLt08':
return '|#eta|#leq0.8'
elif name == 'EtaLt15':
return '|#eta|#leq1.5'
elif name == 'Eta08_15':
return '0.8#leq|#eta|#leq1.5'
else:
return name
def remap_x_values(hist, corr_hist):
"""
Map the x values of hist to the y values of map_hist.
In order to do so, it is necessary that the x values of hist are also present as x-values in map_hist.
Parameters
----------
hist : Hist1D
corr_hist : Hist2D
Correlations between the quantity on hist's x-axis (also corr_hist's xaxis) and the new
quantity to plot agains (on corr_hist's y-axis.
Returns
-------
Graph
Graph of the remapped hist. Errors are ??? TODO
"""
hist = asrootpy(hist)
corr_hist = asrootpy(corr_hist)
profx = asrootpy(corr_hist.ProfileX(gen_random_name()))
rt_graph = Graph()
for i, (nch_ref_bin, counter_bin) in enumerate(zip(profx.bins(), hist.bins())):
rt_graph.SetPoint(i, nch_ref_bin.value, counter_bin.value)
xerr, yerr = nch_ref_bin.error / 2.0, counter_bin.error / 2.0
rt_graph.SetPointError(i, xerr, xerr, yerr, yerr)
return rt_graph
def remove_zero_value_points(g):
# Remove the points backwards, since the index would change if we do it forwards
# The first point has index 0!
points_to_remove = []
for i, (x, y) in enumerate(g):
if not y > 0.0:
points_to_remove.append(i)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_equal_x(g):
"""Remove all points which are on already occupied x values. Ie. The first point is kept, all later ones removed"""
points_to_remove = []
seen_x = []
for i, (x, y) in enumerate(g):
if x in seen_x:
points_to_remove.append(i)
else:
seen_x.append(x)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_x_err_gt_1NchRef(g):
npoints = g.GetN()
points_to_remove = []
for idx in xrange(0, npoints):
if g.GetErrorX(idx) > 1:
points_to_remove.append(idx)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_non_mutual_points(g1, g2):
"""Remove all points with do no have a corresponding point at the same x-value in the other hist"""
points_to_remove1 = []
points_to_remove2 = []
xs1 = [p[0] for p in g1]
xs2 = [p[0] for p in g2]
for i, x in enumerate(xs1):
if x not in xs2:
points_to_remove1.append(i)
for i, x in enumerate(xs2):
if x not in xs1:
points_to_remove2.append(i)
for p in points_to_remove1[::-1]:
g1.RemovePoint(p)
for p in points_to_remove2[::-1]:
g2.RemovePoint(p)
def percentile_bin_to_binidx_bin(percentile_bin, event_counter):
"""
Converts a given percentile interval (eg. (.5, .4)) to an interval of bin numbers of the given
event_counter histogram.
Parameters
----------
percentile_bin : tuple
Two percentiles, each withing 0-1. Needs to be decreasing
event_counter : Hist1D
Distribution of events over a classifier value
Returns
-------
tuple :
two bin numbers representing the given percentile. The first bin is inclusive, the second exclusive.
Ie. The bin numbers can be used directly in SetRange
Raises
------
ValueError :
The percentile specifies a range which is not found in the given event_counter histogram. It might be too
narrow.
"""
nbins = event_counter.GetXaxis().GetNbins()
ntotal_events = event_counter.Integral(1, nbins) # .Integral is a closed interval, as far as I can tell...
# fraction of events with greater or equal classifier values; hence decreasing values
frac_events_with_geq_classifier_value = [event_counter.Integral(binidx, nbins) / float(ntotal_events)
for binidx in range(1, nbins + 1)]
# small checks:
if frac_events_with_geq_classifier_value[0] != 1:
assert(0)
if len(frac_events_with_geq_classifier_value) != nbins:
assert(0)
# produce a list of bools, the first and last True are the first and last bin index
fraction_is_in_percentile_interval = lambda fraction: percentile_bin[0] >= fraction >= percentile_bin[1]
bin_is_in_percentile_interval = map(fraction_is_in_percentile_interval, frac_events_with_geq_classifier_value)
# get the indices of the elements that are True, sorry, this is a bit ugly
indices_of_bins_in_percentile_interval = [i for i, b in enumerate(bin_is_in_percentile_interval) if b]
# return the first and last binidx of the bins in the percentile interval; +1 for root binidx shit
try:
return (indices_of_bins_in_percentile_interval[0] + 1, indices_of_bins_in_percentile_interval[-1] + 1)
except IndexError:
# print "percentiles: "
# print frac_events_with_geq_classifier_value
raise ValueError("The given percentile interval did not match any bins in the given event_counter histogram")
def download_file(alien_path, local_path):
"""
Download a file from `alien_path` to `local`
Parameters
----------
alien_path, local_path : string
Full path to files
"""
if os.path.isfile(local_path):
raise ValueError("Local file exists")
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
pass
alien_path = "alien:/" + alien_path
cp_cmd = ['alien_cp', '-v', '-s', alien_path, local_path]
p = subprocess.Popen(cp_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
if p.returncode != 0:
print "\n", p.stdout.read()
print("An error occued while downloading {0}; "
"The broken file was deleted.".format(local_path))
try:
os.remove(local_path)
except OSError:
pass
def get_generator_name_from_train(alien_path):
"""
Extract the generator name for an `AnalysisResults.root` file on alien_path.
Parameters
----------
alien_path :
Alien path to `AnalysisResults.root`
Returns
-------
str :
Generator name as stated in the train's `env.sh` file
"""
if not alien_path.startswith("alien:"):
alien_path = "alien:/" + alien_path
path_to_env = os.path.join(os.path.split(alien_path)[0], "..", "env.sh")
cp_cmd = ['alien_cp', '-v', '-s', path_to_env, ".env.sh"]
print "copying with: %s"%cp_cmd
subprocess.check_call(cp_cmd)
with open(".env.sh") as f:
for line in f.readlines():
if "PERIOD_NAME" in line:
gen_name = re.match(".*'(.+)'", line).groups()[-1]
break
return gen_name
def get_generator_name_from_filename(fname):
"""
Deduce the generator name from the file name as asigned when the
file was downloaded. Reduce underscores with spaces.
"""
name = re.match(r'.*\d+_\d{8}-\d{4}-(.+)\.root$', fname).groups()[-1]
return name.replace("_", " ")
| bsd-3-clause |
OptiPop/external_chromium_org | third_party/jinja2/environment.py | 614 | 47244 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| bsd-3-clause |
elenaoat/AutobahnPython | examples/websocket/echo_tls/client.py | 32 | 1863 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from optparse import OptionParser
from twisted.python import log
from twisted.internet import reactor, ssl
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!")
def onOpen(self):
self.sendHello()
def onMessage(self, msg, binary):
print "Got echo: " + msg
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
log.startLogging(sys.stdout)
parser = OptionParser()
parser.add_option("-u", "--url", dest = "url", help = "The WebSocket URL", default = "wss://localhost:9000")
(options, args) = parser.parse_args()
## create a WS server factory with our protocol
##
factory = WebSocketClientFactory(options.url, debug = False)
factory.protocol = EchoClientProtocol
## SSL client context: default
##
if factory.isSecure:
contextFactory = ssl.ClientContextFactory()
else:
contextFactory = None
connectWS(factory, contextFactory)
reactor.run()
| apache-2.0 |
ar7z1/ansible | lib/ansible/plugins/cache/memory.py | 159 | 1272 | # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
- This is the default used if no other plugin is specified.
- There are no options to configure.
version_added: historical
author: core team (@ansible-core)
'''
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
| gpl-3.0 |
RussTedrake/director | src/python/ddapp/vtkNumpy.py | 10 | 2294 | import vtkAll as vtk
from shallowCopy import shallowCopy
import numpy as np
try:
from vtk.util import numpy_support
except ImportError:
from paraview import numpy_support
def numpyToPolyData(pts, pointData=None, createVertexCells=False):
pd = vtk.vtkPolyData()
pd.SetPoints(vtk.vtkPoints())
# Makes a deep copy
pd.GetPoints().SetData(getVtkFromNumpy(pts.copy()))
if pointData is not None:
for key, value in pointData.iteritems():
addNumpyToVtk(pd, value.copy(), key)
if createVertexCells:
cellIds = vtk.vtkIdList()
cellIds.SetNumberOfIds(pd.GetNumberOfPoints())
for i in range(pd.GetNumberOfPoints()):
cellIds.SetId(i, i)
cells = vtk.vtkCellArray()
cells.InsertNextCell(cellIds)
pd.SetVerts(cells)
return pd
def getNumpyFromVtk(dataObj, arrayName='Points'):
if arrayName == 'Points':
vtkArray = dataObj.GetPoints().GetData()
else:
vtkArray = dataObj.GetPointData().GetArray(arrayName)
if not vtkArray:
raise KeyError('Array not found')
return numpy_support.vtk_to_numpy(vtkArray)
def getVtkPointsFromNumpy(numpyArray):
points = vtk.vtkPoints()
points.SetData(getVtkFromNumpy(numpyArray))
return points
def getVtkPolyDataFromNumpyPoints(points):
'''
Given an Nx3 array of xyz points
Return a new vtkPolyData containing points and vertex cells.
If the input points is not float64 it will be converted first.
'''
if points.dtype != np.float64:
points = points.astype(np.float64)
polyData = vtk.vtkPolyData()
polyData.SetPoints(getVtkPointsFromNumpy(points))
vtk.vtkPCLConversions.AddVertexCells(polyData)
return polyData
def getVtkFromNumpy(numpyArray):
def MakeCallback(numpyArray):
def Closure(caller, event):
closureArray = numpyArray
return Closure
vtkArray = numpy_support.numpy_to_vtk(numpyArray)
vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray))
return vtkArray
def addNumpyToVtk(dataObj, numpyArray, arrayName):
assert dataObj.GetNumberOfPoints() == numpyArray.shape[0]
vtkArray = getVtkFromNumpy(numpyArray)
vtkArray.SetName(arrayName)
dataObj.GetPointData().AddArray(vtkArray)
| bsd-3-clause |
nicktimko/multiworm | tapeworm/tests/test_bad_datasets.py | 1 | 1467 | from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from six.moves import (zip, filter, map, reduce, input, range)
import unittest
import numpy as np
import tapeworm.scoring
from tapeworm.core import OneGoodBlobException
class TestBadDatasets(unittest.TestCase):
def setUp(self):
np.random.seed(1)
def test_one_good_blob(self):
self.length = 30
self.n_paths = 1
offset = 0.8
displacement_data = np.ma.array([
0.2 * (np.random.rand() + 0.5) # some random scaling
* np.append([0], # start at 0
np.cumsum(np.random.rand(self.length - 1) + offset))
for _ in range(self.n_paths)
])
self.min_f, self.max_f = (1, self.length - 1)
self.min_d, self.max_d = (0, displacement_data.max())
self.mean_f = (self.min_f + self.max_f) / 2
self.mean_d = (self.min_d + self.max_d) / 2
try:
self.scorer = tapeworm.scoring.DisplacementScorer(displacement_data)
except OneGoodBlobException:
pass
except Exception as e:
self.fail("Unexpected {} exception when trying to construct "
"scoring model with single trace.".format(e.__class__.__name__))
else:
self.fail("Scorer allowed construction of model using a single trace.")
if __name__ == '__main__':
unittest.main()
| mit |
amyphan/ilikevideoga.me | node_modules/karma/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
researchstudio-sat/wonpreprocessing | python-processing/tools/bm25.py | 1 | 2555 | __author__ = 'hfriedrich'
import numpy as np
from tools.tensor_utils import SparseTensor
from math import log10
from scipy.sparse import csr_matrix
# see http://en.wikipedia.org/wiki/Okapi_BM25
# parameters:
# tensor: SparseTensor object
# indices: indices pointing to (need, need) combinations to compute the connection bm25 score for
# threshold: if threshold is given result array is binary
# var_k: see http://en.wikipedia.org/wiki/Okapi_BM25
# var_b: http://en.wikipedia.org/wiki/Okapi_BM25
# return: array with result bm25 scores (float or binary) for each index pair
def bm25_link_prediciton(tensor, indices, threshold=None, var_k=1.5, var_b=0.75):
# combine the attributes in one matrix
m_csr = m = tensor.getSliceMatrix(SparseTensor.ATTR_SUBJECT_SLICE) + \
tensor.getSliceMatrix(SparseTensor.ATTR_CONTENT_SLICE) + \
tensor.getSliceMatrix(SparseTensor.CATEGORY_SLICE)
# compute the average document length
numNeeds = len(tensor.getNeedIndices())
avgDocLength = len(m_csr[tensor.getNeedIndices(),:].nonzero()[1]) / float(numNeeds)
# create a dictionary with computed idf values of all attributes
idf = dict()
m_csc = m_csr.tocsc()
for attr in tensor.getAttributeIndices():
n = len(m_csc[:,attr].nonzero()[1])
idf[attr] = log10((numNeeds - n + 0.5) / (n + 0.5))
# compute the BM25 score for every index
# if a threshold is specified use it to set the result prediction value to 0 or 1
prediction = []
for i in range(len(indices[0])):
docNeed = indices[0][i]
docLength = len(m[docNeed,:].nonzero()[1])
queryNeed = indices[1][i]
queryAttrs = m_csr[queryNeed,:].nonzero()[1]
s = 0
if len(m[docNeed,queryAttrs].nonzero()[0]) > 0:
queryAttrs = [attr for attr in queryAttrs if m[docNeed,attr] != 0.0]
if len(queryAttrs) > 0:
s = score(docLength, avgDocLength, queryAttrs, idf, var_k, var_b)
if threshold != None:
s = 1 if s > threshold else 0
prediction.append(s)
return prediction
# for computation of the score use term frequency either 0 or 1 since we don't have the number of occurrences of an
# attribute in a need. This is why in queryAttrs only are attributes that actually are part of the document.
def score(docLength, avgDocLength, queryAttrs, idf, k, b):
sum = 0
d = (k + 1) / (1 + k * (1 - b + b * docLength / avgDocLength))
for attr in queryAttrs:
sum += idf[attr] * d
return sum
| apache-2.0 |
thelabnyc/django-activity-stream | actstream/tests/base.py | 1 | 5135 | from json import loads
from datetime import datetime
from inspect import getargspec
from django.apps import apps
from django.test import TestCase
from django.template import Template, Context
from django.utils.timesince import timesince
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from actstream.models import Action, Follow
from actstream.registry import register, unregister
from actstream.actions import follow
from actstream.signals import action
def render(src, **ctx):
return Template('{% load activity_tags %}' + src).render(Context(ctx))
class LTE(int):
def __new__(cls, n):
obj = super(LTE, cls).__new__(cls, n)
obj.n = n
return obj
def __eq__(self, other):
return other <= self.n
def __repr__(self):
return "<= %s" % self.n
class ActivityBaseTestCase(TestCase):
actstream_models = ()
maxDiff = None
def setUp(self):
self.User = get_user_model()
self.user_ct = ContentType.objects.get_for_model(self.User)
register(self.User)
for model in self.actstream_models:
register(model)
def assertSetEqual(self, l1, l2, msg=None, domap=True):
if domap:
l1 = map(str, l1)
self.assertSequenceEqual(set(l1), set(l2), msg)
def assertAllIn(self, bits, string):
for bit in bits:
self.assertIn(bit, string)
def assertJSON(self, string):
return loads(string)
def tearDown(self):
for model in self.actstream_models:
model = apps.get_model(*model.split('.'))
unregister(model)
model.objects.all().delete()
Action.objects.all().delete()
Follow.objects.all().delete()
self.User.objects.all().delete()
def capture(self, viewname, *args):
response = self.client.get(reverse(viewname, args=args))
content = response.content.decode()
if response['Content-Type'] == 'application/json':
return loads(content)
return content
class DataTestCase(ActivityBaseTestCase):
actstream_models = ('auth.Group', 'sites.Site')
def setUp(self):
self.testdate = datetime(2000, 1, 1)
self.timesince = timesince(self.testdate).encode('utf8').replace(
b'\xc2\xa0', b' ').decode()
self.group_ct = ContentType.objects.get_for_model(Group)
super(DataTestCase, self).setUp()
self.group = Group.objects.create(name='CoolGroup')
self.another_group = Group.objects.create(name='NiceGroup')
if 'email' in getargspec(self.User.objects.create_superuser).args:
self.user1 = self.User.objects.create_superuser('admin', 'admin@example.com', 'admin')
self.user2 = self.User.objects.create_user('Two', 'two@example.com')
self.user3 = self.User.objects.create_user('Three', 'three@example.com')
self.user4 = self.User.objects.create_user('Four', 'four@example.com')
else:
self.user1 = self.User.objects.create_superuser('admin', 'admin')
self.user2 = self.User.objects.create_user('Two')
self.user3 = self.User.objects.create_user('Three')
self.user4 = self.User.objects.create_user('Four')
# User1 joins group
self.user1.groups.add(self.group)
self.join_action = action.send(self.user1, verb='joined',
target=self.group,
timestamp=self.testdate)[0][1]
# User1 follows User2
follow(self.user1, self.user2, timestamp=self.testdate)
# User2 joins group
self.user2.groups.add(self.group)
action.send(self.user2, verb='joined', target=self.group,
timestamp=self.testdate)
# User2 follows group
follow(self.user2, self.group, timestamp=self.testdate)
# User1 comments on group
# Use a site object here and predict the "__unicode__ method output"
action.send(self.user1, verb='commented on', target=self.group,
timestamp=self.testdate)
self.comment = Site.objects.create(
domain="admin: Sweet Group!...")
# Group responds to comment
action.send(self.group, verb='responded to', target=self.comment,
timestamp=self.testdate)
# User 3 did something but doesn't following someone
action.send(self.user3, verb='liked actstream', timestamp=self.testdate)
# User4 likes group
follow(self.user4, self.another_group, timestamp=self.testdate, flag='liking')
# User4 watches group
follow(self.user4, self.another_group, timestamp=self.testdate, flag='watching')
# User4 likes User1
follow(self.user4, self.user1, timestamp=self.testdate, flag='liking')
# User4 blacklist user3
follow(self.user4, self.user3, timestamp=self.testdate, flag='blacklisting')
| bsd-3-clause |
openstack/nova | nova/tests/unit/virt/vmwareapi/test_vif.py | 4 | 12542 | # Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_vmware import vim_util
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self._network = network
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = fake.FakeSession()
self.cluster = None
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'fake_model', None)
self.assertEqual([], vif_info)
def test_get_vif_info_empty_list(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'fake_model', [])
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'fake_model', network_info)
expected = [{'iface_id': utils.FAKE_VIF_UUID,
'mac_address': utils.FAKE_VIF_MAC,
'network_name': utils.FAKE_NETWORK_BRIDGE,
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_network_ref_ovs_integration_bridge(self, mock_check):
self.flags(integration_bridge='fake-bridge-id', group='vmware')
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-bridge-id',
'network-type': 'opaque',
'use-external-id': False}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_network_ref_ovs(self, mock_check):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 0,
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_network_ref_ovs_logical_switch_id(self, mock_check):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network,
details={'nsx-logical-switch-id':
'fake-nsx-id'})]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-nsx-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_network_ref_dvs(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
mock_network_name.return_value = fake_network_obj
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
mock_network_name.assert_called_once_with('fake-session',
'fa0',
'fake-cluster')
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_network_ref_dvs_vif_details(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'pg1',
'dvsw': 'fake-props'}
mock_network_name.return_value = fake_network_obj
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
details={'dvs_port_key': 'key1',
'dvs_port_group_name': 'pg1'},
address='DE:AD:BE:EF:00:00',
network=self._network)])[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
mock_network_name.assert_called_once_with('fake-session',
'pg1',
'fake-cluster')
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name',
return_value=None)
def test_get_network_ref_dvs_no_match(self, mock_network_name):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_network_ref,
'fake-session',
'fake-cluster',
vif_info)
def test_get_network_ref_invalid_type(self):
vif_info = network_model.NetworkInfo([
network_model.VIF(address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.InvalidInput,
vif.get_network_ref,
'fake-session',
'fake-cluster',
vif_info)
@mock.patch.object(vif.LOG, 'warning')
@mock.patch.object(vim_util, 'get_vc_version',
return_value='5.0.0')
def test_check_invalid_ovs_version(self, mock_version, mock_warning):
vif._check_ovs_supported_version('fake_session')
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_OVS_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_network_ref_dvs_provider(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
mock_network_name.side_effect = [fake_network_obj]
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
calls = [mock.call('fake-session', 'fa0', 'fake-cluster')]
mock_network_name.assert_has_calls(calls)
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name',
return_value=None)
def test_raise_neutron_network_dvs(self, mock_network_name):
mock_network_name.side_effect = None
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_network_ref,
'fake-session',
'fake-cluster',
vif_info)
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_network_ref_dvs_with_dvs_pg_id(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
mock_network_name.side_effect = [fake_network_obj]
vif_details = {
'dvs_id': 'fake-props',
'pg_id': 'fake-key'
}
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network,
details=vif_details)]
)[0]
network_ref = vif.get_network_ref('fake-session',
'fake-cluster',
vif_info)
fake_network_ref = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
self.assertEqual(network_ref, fake_network_ref)
calls = []
mock_network_name.assert_has_calls(calls)
self.assertEqual(fake_network_obj, network_ref)
| apache-2.0 |
alexthered/kienhoc-platform | lms/djangoapps/courseware/management/commands/clean_history.py | 84 | 7312 | """A command to clean the StudentModuleHistory table.
When we added XBlock storage, each field modification wrote a new history row
to the db. Now that we have bulk saves to avoid that database hammering, we
need to clean out the unnecessary rows from the database.
This command that does that.
"""
import datetime
import json
import logging
import optparse
import time
import traceback
from django.core.management.base import NoArgsCommand
from django.db import transaction
from django.db.models import Max
from courseware.models import StudentModuleHistory
class Command(NoArgsCommand):
"""The actual clean_history command to clean history rows."""
help = "Deletes unneeded rows from the StudentModuleHistory table."
option_list = NoArgsCommand.option_list + (
optparse.make_option(
'--batch',
type='int',
default=100,
help="Batch size, number of module_ids to examine in a transaction.",
),
optparse.make_option(
'--dry-run',
action='store_true',
default=False,
help="Don't change the database, just show what would be done.",
),
optparse.make_option(
'--sleep',
type='float',
default=0,
help="Seconds to sleep between batches.",
),
)
def handle_noargs(self, **options):
# We don't want to see the SQL output from the db layer.
logging.getLogger("django.db.backends").setLevel(logging.INFO)
smhc = StudentModuleHistoryCleaner(
dry_run=options["dry_run"],
)
smhc.main(batch_size=options["batch"], sleep=options["sleep"])
class StudentModuleHistoryCleaner(object):
"""Logic to clean rows from the StudentModuleHistory table."""
DELETE_GAP_SECS = 0.5 # Rows this close can be discarded.
STATE_FILE = "clean_history.json"
BATCH_SIZE = 100
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.next_student_module_id = 0
self.last_student_module_id = 0
def main(self, batch_size=None, sleep=0):
"""Invoked from the management command to do all the work."""
batch_size = batch_size or self.BATCH_SIZE
transaction.enter_transaction_management()
self.last_student_module_id = self.get_last_student_module_id()
self.load_state()
while self.next_student_module_id <= self.last_student_module_id:
for smid in self.module_ids_to_check(batch_size):
try:
self.clean_one_student_module(smid)
except Exception: # pylint: disable=broad-except
trace = traceback.format_exc()
self.say("Couldn't clean student_module_id {}:\n{}".format(smid, trace))
if not self.dry_run:
self.commit()
self.save_state()
if sleep:
time.sleep(sleep)
def say(self, message):
"""
Display a message to the user.
The message will have a trailing newline added to it.
"""
print message
def commit(self):
"""
Commit the transaction.
"""
self.say("Committing")
transaction.commit()
def load_state(self):
"""
Load the latest state from disk.
"""
try:
state_file = open(self.STATE_FILE)
except IOError:
self.say("No stored state")
self.next_student_module_id = 0
else:
with state_file:
state = json.load(state_file)
self.say(
"Loaded stored state: {}".format(
json.dumps(state, sort_keys=True)
)
)
self.next_student_module_id = state['next_student_module_id']
def save_state(self):
"""
Save the state to disk.
"""
state = {
'next_student_module_id': self.next_student_module_id,
}
with open(self.STATE_FILE, "w") as state_file:
json.dump(state, state_file)
self.say("Saved state: {}".format(json.dumps(state, sort_keys=True)))
def get_last_student_module_id(self):
"""
Return the id of the last student_module.
"""
last = StudentModuleHistory.objects.all() \
.aggregate(Max('student_module'))['student_module__max']
self.say("Last student_module_id is {}".format(last))
return last
def module_ids_to_check(self, batch_size):
"""Produce a sequence of student module ids to check.
`batch_size` is how many module ids to produce, max.
The sequence starts with `next_student_module_id`, and goes up to
and including `last_student_module_id`.
`next_student_module_id` is updated as each id is yielded.
"""
start = self.next_student_module_id
for smid in range(start, start + batch_size):
if smid > self.last_student_module_id:
break
yield smid
self.next_student_module_id = smid + 1
def get_history_for_student_modules(self, student_module_id):
"""
Get the history rows for a student module.
```student_module_id```: the id of the student module we're
interested in.
Return a list: [(id, created), ...], all the rows of history.
"""
history = StudentModuleHistory.objects \
.filter(student_module=student_module_id) \
.order_by('created', 'id')
return [(row.id, row.created) for row in history]
def delete_history(self, ids_to_delete):
"""
Delete history rows.
```ids_to_delete```: a non-empty list (or set...) of history row ids to delete.
"""
assert ids_to_delete
StudentModuleHistory.objects.filter(id__in=ids_to_delete).delete()
def clean_one_student_module(self, student_module_id):
"""Clean one StudentModule's-worth of history.
`student_module_id`: the id of the StudentModule to process.
"""
delete_gap = datetime.timedelta(seconds=self.DELETE_GAP_SECS)
history = self.get_history_for_student_modules(student_module_id)
if not history:
self.say("No history for student_module_id {}".format(student_module_id))
return
ids_to_delete = []
next_created = None
for history_id, created in reversed(history):
if next_created is not None:
# Compare this timestamp with the next one.
if (next_created - created) < delete_gap:
# This row is followed closely by another, we can discard
# this one.
ids_to_delete.append(history_id)
next_created = created
verb = "Would have deleted" if self.dry_run else "Deleting"
self.say("{verb} {to_delete} rows of {total} for student_module_id {id}".format(
verb=verb,
to_delete=len(ids_to_delete),
total=len(history),
id=student_module_id,
))
if ids_to_delete and not self.dry_run:
self.delete_history(ids_to_delete)
| agpl-3.0 |
veger/ansible | lib/ansible/utils/ssh_functions.py | 148 | 1600 | # (c) 2016, James Tanner
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
from ansible.module_utils._text import to_bytes
_HAS_CONTROLPERSIST = {}
def check_for_controlpersist(ssh_executable):
try:
# If we've already checked this executable
return _HAS_CONTROLPERSIST[ssh_executable]
except KeyError:
pass
b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
has_cp = True
try:
cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if b"Bad configuration option" in err or b"Usage:" in err:
has_cp = False
except OSError:
has_cp = False
_HAS_CONTROLPERSIST[ssh_executable] = has_cp
return has_cp
| gpl-3.0 |
nashve/mythbox | resources/lib/twisted/twisted/conch/manhole_ssh.py | 61 | 5076 | # -*- test-case-name: twisted.conch.test.test_manhole -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
insults/SSH integration support.
@author: Jp Calderone
"""
from zope.interface import implements
from twisted.conch import avatar, interfaces as iconch, error as econch
from twisted.conch.ssh import factory, keys, session
from twisted.cred import credentials, checkers, portal
from twisted.python import components
from twisted.conch.insults import insults
class _Glue:
"""A feeble class for making one attribute look like another.
This should be replaced with a real class at some point, probably.
Try not to write new code that uses it.
"""
def __init__(self, **kw):
self.__dict__.update(kw)
def __getattr__(self, name):
raise AttributeError(self.name, "has no attribute", name)
class TerminalSessionTransport:
def __init__(self, proto, chainedProtocol, avatar, width, height):
self.proto = proto
self.avatar = avatar
self.chainedProtocol = chainedProtocol
session = self.proto.session
self.proto.makeConnection(
_Glue(write=self.chainedProtocol.dataReceived,
loseConnection=lambda: avatar.conn.sendClose(session),
name="SSH Proto Transport"))
def loseConnection():
self.proto.loseConnection()
self.chainedProtocol.makeConnection(
_Glue(write=self.proto.write,
loseConnection=loseConnection,
name="Chained Proto Transport"))
# XXX TODO
# chainedProtocol is supposed to be an ITerminalTransport,
# maybe. That means perhaps its terminalProtocol attribute is
# an ITerminalProtocol, it could be. So calling terminalSize
# on that should do the right thing But it'd be nice to clean
# this bit up.
self.chainedProtocol.terminalProtocol.terminalSize(width, height)
class TerminalSession(components.Adapter):
implements(iconch.ISession)
transportFactory = TerminalSessionTransport
chainedProtocolFactory = insults.ServerProtocol
def getPty(self, term, windowSize, attrs):
self.height, self.width = windowSize[:2]
def openShell(self, proto):
self.transportFactory(
proto, self.chainedProtocolFactory(),
iconch.IConchUser(self.original),
self.width, self.height)
def execCommand(self, proto, cmd):
raise econch.ConchError("Cannot execute commands")
def closed(self):
pass
class TerminalUser(avatar.ConchUser, components.Adapter):
def __init__(self, original, avatarId):
components.Adapter.__init__(self, original)
avatar.ConchUser.__init__(self)
self.channelLookup['session'] = session.SSHSession
class TerminalRealm:
userFactory = TerminalUser
sessionFactory = TerminalSession
transportFactory = TerminalSessionTransport
chainedProtocolFactory = insults.ServerProtocol
def _getAvatar(self, avatarId):
comp = components.Componentized()
user = self.userFactory(comp, avatarId)
sess = self.sessionFactory(comp)
sess.transportFactory = self.transportFactory
sess.chainedProtocolFactory = self.chainedProtocolFactory
comp.setComponent(iconch.IConchUser, user)
comp.setComponent(iconch.ISession, sess)
return user
def __init__(self, transportFactory=None):
if transportFactory is not None:
self.transportFactory = transportFactory
def requestAvatar(self, avatarId, mind, *interfaces):
for i in interfaces:
if i is iconch.IConchUser:
return (iconch.IConchUser,
self._getAvatar(avatarId),
lambda: None)
raise NotImplementedError()
class ConchFactory(factory.SSHFactory):
publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJSkbh/C+BR3utDS555mV'
publicKeys = {
'ssh-rsa' : keys.Key.fromString(publicKey)
}
del publicKey
privateKey = """-----BEGIN RSA PRIVATE KEY-----
MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW
4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw
vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb
Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1
xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8
PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2
gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu
DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML
pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP
EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg==
-----END RSA PRIVATE KEY-----"""
privateKeys = {
'ssh-rsa' : keys.Key.fromString(privateKey)
}
del privateKey
def __init__(self, portal):
self.portal = portal
| gpl-2.0 |
princeofdarkness76/thefuck | tests/rules/test_docker_not_command.py | 17 | 6175 | import pytest
from io import BytesIO
from tests.utils import Command
from thefuck.rules.docker_not_command import get_new_command, match
@pytest.fixture
def docker_help(mocker):
help = b'''Usage: docker [OPTIONS] COMMAND [arg...]
A self-sufficient runtime for linux containers.
Options:
--api-cors-header= Set CORS headers in the remote API
-b, --bridge= Attach containers to a network bridge
--bip= Specify network bridge IP
-D, --debug=false Enable debug mode
-d, --daemon=false Enable daemon mode
--default-gateway= Container default gateway IPv4 address
--default-gateway-v6= Container default gateway IPv6 address
--default-ulimit=[] Set default ulimits for containers
--dns=[] DNS server to use
--dns-search=[] DNS search domains to use
-e, --exec-driver=native Exec driver to use
--exec-opt=[] Set exec driver options
--exec-root=/var/run/docker Root of the Docker execdriver
--fixed-cidr= IPv4 subnet for fixed IPs
--fixed-cidr-v6= IPv6 subnet for fixed IPs
-G, --group=docker Group for the unix socket
-g, --graph=/var/lib/docker Root of the Docker runtime
-H, --host=[] Daemon socket(s) to connect to
-h, --help=false Print usage
--icc=true Enable inter-container communication
--insecure-registry=[] Enable insecure registry communication
--ip=0.0.0.0 Default IP when binding container ports
--ip-forward=true Enable net.ipv4.ip_forward
--ip-masq=true Enable IP masquerading
--iptables=true Enable addition of iptables rules
--ipv6=false Enable IPv6 networking
-l, --log-level=info Set the logging level
--label=[] Set key=value labels to the daemon
--log-driver=json-file Default driver for container logs
--log-opt=map[] Set log driver options
--mtu=0 Set the containers network MTU
-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file
--registry-mirror=[] Preferred Docker registry mirror
-s, --storage-driver= Storage driver to use
--selinux-enabled=false Enable selinux support
--storage-opt=[] Set storage driver options
--tls=false Use TLS; implied by --tlsverify
--tlscacert=~/.docker/ca.pem Trust certs signed only by this CA
--tlscert=~/.docker/cert.pem Path to TLS certificate file
--tlskey=~/.docker/key.pem Path to TLS key file
--tlsverify=false Use TLS and verify the remote
--userland-proxy=true Use userland proxy for loopback traffic
-v, --version=false Print version information and quit
Commands:
attach Attach to a running container
build Build an image from a Dockerfile
commit Create a new image from a container's changes
cp Copy files/folders from a container's filesystem to the host path
create Create a new container
diff Inspect changes on a container's filesystem
events Get real time events from the server
exec Run a command in a running container
export Stream the contents of a container as a tar archive
history Show the history of an image
images List images
import Create a new filesystem image from the contents of a tarball
info Display system-wide information
inspect Return low-level information on a container or image
kill Kill a running container
load Load an image from a tar archive
login Register or log in to a Docker registry server
logout Log out from a Docker registry server
logs Fetch the logs of a container
pause Pause all processes within a container
port Lookup the public-facing port that is NAT-ed to PRIVATE_PORT
ps List containers
pull Pull an image or a repository from a Docker registry server
push Push an image or a repository to a Docker registry server
rename Rename an existing container
restart Restart a running container
rm Remove one or more containers
rmi Remove one or more images
run Run a command in a new container
save Save an image to a tar archive
search Search for an image on the Docker Hub
start Start a stopped container
stats Display a stream of a containers' resource usage statistics
stop Stop a running container
tag Tag an image into a repository
top Lookup the running processes of a container
unpause Unpause a paused container
version Show the Docker version information
wait Block until a container stops, then print its exit code
Run 'docker COMMAND --help' for more information on a command.
'''
mock = mocker.patch('subprocess.Popen')
mock.return_value.stdout = BytesIO(help)
return mock
def stderr(cmd):
return "docker: '{}' is not a docker command.\n" \
"See 'docker --help'.".format(cmd)
def test_match():
assert match(Command('docker pes', stderr=stderr('pes')))
@pytest.mark.parametrize('script, stderr', [
('docker ps', ''),
('cat pes', stderr('pes'))])
def test_not_match(script, stderr):
assert not match(Command(script, stderr=stderr))
@pytest.mark.usefixtures('docker_help')
@pytest.mark.parametrize('wrong, fixed', [
('pes', ['ps', 'push', 'pause']),
('tags', ['tag', 'stats', 'images'])])
def test_get_new_command(wrong, fixed):
command = Command('docker {}'.format(wrong), stderr=stderr(wrong))
assert get_new_command(command) == ['docker {}'.format(x) for x in fixed]
| mit |
RafaelTorrealba/odoo | addons/crm/calendar_event.py | 375 | 1829 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
class calendar_event(osv.Model):
""" Model for Calendar Event """
_inherit = 'calendar.event'
_columns = {
'phonecall_id': fields.many2one('crm.phonecall', 'Phonecall'),
'opportunity_id': fields.many2one('crm.lead', 'Opportunity', domain="[('type', '=', 'opportunity')]"),
}
def create(self, cr, uid, vals, context=None):
res = super(calendar_event, self).create(cr, uid, vals, context=context)
obj = self.browse(cr, uid, res, context=context)
if obj.opportunity_id:
self.pool.get('crm.lead').log_meeting(cr, uid, [obj.opportunity_id.id], obj.name, obj.start, obj.duration, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ramr/origin | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 29 | 44441 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
# set --allow-privileged flag for kubelet
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('kubernetes-worker.gpu.enabled'):
hookenv.log('Adding '
'--feature-gates=DevicePlugins=true '
'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
elif context['arch'] == 'arm64':
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller-arm64:0.9.0-beta.15"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1beta2'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gcp'
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud-request-sent')
hookenv.status_set('waiting', 'waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.cloud-request-sent')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready')
@when_not('kubernetes-worker.restarted-for-cloud')
def restart_for_cloud():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
set_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.restart-needed')
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
| apache-2.0 |
jonesmat/codechallenge | main.py | 2 | 8187 | import os
from random import randint
import uuid
import time
from flask import Flask, request, render_template, redirect, make_response
from werkzeug import secure_filename
from data_manager import DataManager
from model.puzzle_manager import PuzzleManager
from model.puzzle import Puzzle, PuzzleState
from model.problem import Problem
from model.problem_attempt import ProblemAttempt
########## Initialization ###########
app = Flask(__name__, static_url_path="/content", static_folder = "content")
datamgr = DataManager()
datamgr.load()
puzzmgr = PuzzleManager(datamgr)
puzzmgr.load()
ADMIN_PASSWORD = 'puzzles'
# Setup web app configurations
app.config['SOLUTION_FOLDER'] = 'solutions/'
if not os.path.exists(app.config['SOLUTION_FOLDER']):
os.makedirs(app.config['SOLUTION_FOLDER'])
app.config['MAX_CONTENT_LENGTH'] = 10 * 1024 # 10KB
########## Routes ###########
@app.route('/', methods=['GET', 'POST'])
def home():
try:
if request.method == 'GET':
teamname = request.cookies.get('teamname')
if not teamname: # not set
return render_template('team_login.html')
return render_template('home.html', puzzles=puzzmgr.puzzles,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=teamname)
else:
# Post
# Get the teamname from the form post
teamname = request.form['teamname']
if not teamname: # not set
return render_template('team_login.html')
resp = make_response(render_template('home.html', puzzles=puzzmgr.puzzles,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=teamname))
resp.set_cookie('teamname', teamname)
return resp
except Exception as ex:
return render_template('error.html', error=str(ex)), 500
@app.route('/changeteamname')
def change_teamname():
resp = make_response(redirect('/'))
resp.set_cookie('teamname', '')
return resp
@app.route('/puzzle/<puzzle_id>', methods=['GET', 'POST'])
def show_puzzle(puzzle_id):
try:
if request.method == 'GET':
if not request.cookies.get('teamname'): # teamname not set, can't view puzzle
return render_template('team_login.html')
puzzle = puzzmgr.get_puzzle(puzzle_id)
if puzzle is None:
return render_template('error.html', error="Puzzle does not exist!"), 403
if puzzle.state == PuzzleState.OPEN:
return render_template('puzzle.html', puzzle=puzzle,
global_point_totals=puzzmgr.get_global_point_totals(),
puzzle_point_totals=puzzle.get_puzzle_point_totals(),
teamname=request.cookies.get('teamname'))
elif puzzle.state == PuzzleState.CLOSED:
return render_template('puzzle_closed.html', puzzle=puzzle,
global_point_totals=puzzmgr.get_global_point_totals(),
puzzle_point_totals=puzzle.get_puzzle_point_totals(),
teamname=request.cookies.get('teamname'))
elif puzzle.state == PuzzleState.NEW:
return render_template('error.html', error="Puzzle is not yet available!"), 403
else:
if 'submit_teamname' in request.form:
# Teamname just submitted
# Get the teamname from the form post
teamname = request.form['teamname']
if not teamname: # not set
return render_template('team_login.html')
resp = make_response(redirect('/puzzle/' + puzzle_id))
resp.set_cookie('teamname', teamname)
return resp
# Problem attempt submission
# retrieve form data
prob_id = request.form['prob_id']
teamname = request.cookies.get('teamname')
solution_file = request.files['solution_file'] # Returns the actual File obj
# ensure the puzzle is open and accepting submissions
puzzle = puzzmgr.get_puzzle(puzzle_id)
if puzzle.state != PuzzleState.OPEN:
# The puzzle is not open, redirect to the closed puzzle page
return render_template('puzzle_closed.html', puzzle=puzzle,
global_point_totals=puzzmgr.get_global_point_totals(),
puzzle_point_totals=puzzle.get_puzzle_point_totals(),
teamname=request.cookies.get('teamname'))
# Clean and truncate teamname to prevent abuse
teamname = teamname.strip()
if len(teamname) > 15:
teamname = teamname[0:12] + '...'
# Store the uploaded solution file
solution_filepath = ''
if solution_file:
filename = secure_filename(solution_file.filename)
filename += str(uuid.uuid1())
solution_filepath = os.path.join(app.config['SOLUTION_FOLDER'],
filename)
solution_file.save(solution_filepath)
else:
# A solution file is required, can't continue without it
return redirect('/', code=302)
# Feed solution and problem files to puzzle app to score the attempt.
problem = puzzle.get_problem(prob_id)
score, error_msg = puzzmgr.score_attempt(puzzle.app_path, problem.problem_file, solution_filepath)
# Record the attempt
attempt = ProblemAttempt()
attempt.teamname = teamname
attempt.score = -1 # -1 indicates an error
if score > 0:
attempt.score = score
attempt.timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
attempt.timedata = time.time()
attempt.solution_filepath = solution_filepath
problem.attempts.append(attempt)
puzzmgr.save() # Data changed, lets save it.
else:
pass # TODO log it
return render_template('puzzle_submitted.html', puzzle_id=puzzle_id, attempt=attempt,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'),
error_msg=error_msg)
except Exception as ex:
return render_template('error.html', error=str(ex)), 500
@app.route('/admin', methods=['GET', 'POST'])
def show_admin():
try:
if request.method == 'GET':
return render_template('admin_login.html',
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'))
else:
if 'passcode' in request.form:
# Admin is attempting to login
passcode = request.form['passcode']
if passcode == ADMIN_PASSWORD:
return render_template('admin.html', puzzmgr=puzzmgr,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'))
else:
return render_template('admin_login.html',
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'))
elif 'puzzle_id' in request.form:
# Admin is updating a puzzle
puzzle = puzzmgr.get_puzzle(request.form['puzzle_id'])
assert(puzzle)
# Determine which action the admin is taking on the puzzle
if 'open_puzzle' in request.form and puzzle.state != PuzzleState.OPEN:
puzzle.state = PuzzleState.OPEN
elif 'close_puzzle' in request.form and puzzle.state != PuzzleState.CLOSED:
puzzle.state = PuzzleState.CLOSED
elif 'reset_puzzle' in request.form and puzzle.state != PuzzleState.NEW:
puzzle.reset()
puzzmgr.save() # Data changed, save it
# Return to the admin page after updating the puzzle
return render_template('admin.html', puzzmgr=puzzmgr,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'))
elif 'reload_data' in request.form:
# Admin is requesting a data reload
datamgr.load()
puzzmgr.load()
# Reload the admin page
return render_template('admin.html', puzzmgr=puzzmgr,
global_point_totals=puzzmgr.get_global_point_totals(),
teamname=request.cookies.get('teamname'))
except Exception as ex:
return render_template('error.html', error=str(ex)), 500
# Error handlers
@app.errorhandler(403)
def forbidden_request(error):
return render_template('error.html', error=error), 403
@app.errorhandler(404)
def page_not_found_request(error):
return render_template('error.html', error=error), 404
@app.errorhandler(410)
def gone_request(error):
return render_template('error.html', error=error), 410
@app.errorhandler(413)
def file_too_large_request(error):
return render_template('error.html', error=error), 413
@app.errorhandler(500)
def internal_server_error_request(error):
return render_template('error.html', error=error), 500
########## Main ###########
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| mit |
neuroelectro/neuroelectro_org | neuroelectro/south_migrations/0077_auto__add_neurondata__add_ephysproperty__add_neuron.py | 2 | 33888 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NeuronData'
db.create_table(u'neuroelectro_neurondata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.NeuronDataAddMain'])),
('neuron_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'neuroelectro', ['NeuronData'])
# Adding model 'EphysProperty'
db.create_table(u'neuroelectro_ephysproperty', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('neuron_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.NeuronData'])),
('ephys_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ephys_value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'neuroelectro', ['EphysProperty'])
# Adding model 'NeuronDataAddMain'
db.create_table(u'neuroelectro_neurondataaddmain', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pubmed_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'neuroelectro', ['NeuronDataAddMain'])
def backwards(self, orm):
# Deleting model 'NeuronData'
db.delete_table(u'neuroelectro_neurondata')
# Deleting model 'EphysProperty'
db.delete_table(u'neuroelectro_ephysproperty')
# Deleting model 'NeuronDataAddMain'
db.delete_table(u'neuroelectro_neurondataaddmain')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.api': {
'Meta': {'object_name': 'API'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'suggester': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'full_text_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'neuroelectro.articlefulltextstat': {
'Meta': {'object_name': 'ArticleFullTextStat'},
'article_full_text': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ArticleFullText']"}),
'data_table_ephys_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_human_assigned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'methods_tag_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'neuron_article_map_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_unique_ephys_concept_maps': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.articlemetadatamap': {
'Meta': {'object_name': 'ArticleMetaDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.MetaData']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'})
},
u'neuroelectro.articlesummary': {
'Meta': {'object_name': 'ArticleSummary'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.author': {
'Meta': {'object_name': 'Author'},
'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'middle': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.contvalue': {
'Meta': {'object_name': 'ContValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mean': ('django.db.models.fields.FloatField', [], {}),
'min_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stderr': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stdev': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.datasource': {
'Meta': {'object_name': 'DataSource'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataTable']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_submission': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserSubmission']", 'null': 'True'}),
'user_upload': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserUpload']", 'null': 'True'})
},
u'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
u'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'definition': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'norm_criteria': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.EphysPropSyn']", 'symmetrical': 'False'}),
'units': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Unit']", 'null': 'True'})
},
u'neuroelectro.ephysproperty': {
'Meta': {'object_name': 'EphysProperty'},
'ephys_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ephys_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.NeuronData']"})
},
u'neuroelectro.ephyspropsummary': {
'Meta': {'object_name': 'EphysPropSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_mean_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'neuroelectro.institution': {
'Meta': {'object_name': 'Institution'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Publisher']", 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.mailinglistentry': {
'Meta': {'object_name': 'MailingListEntry'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
u'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.metadata': {
'Meta': {'object_name': 'MetaData'},
'cont_value': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ContValue']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ref_text': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ReferenceText']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'neuron_db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.neurondata': {
'Meta': {'object_name': 'NeuronData'},
'article_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.NeuronDataAddMain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'neuroelectro.neurondataaddmain': {
'Meta': {'object_name': 'NeuronDataAddMain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pubmed_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MetaData']", 'symmetrical': 'False'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.NeuronConceptMap']"}),
'norm_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_norm': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronephyssummary': {
'Meta': {'object_name': 'NeuronEphysSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronsummary': {
'Meta': {'object_name': 'NeuronSummary'},
'cluster_xval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'cluster_yval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_ephysprops': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.publisher': {
'Meta': {'object_name': 'Publisher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.referencetext': {
'Meta': {'object_name': 'ReferenceText'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
u'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'expr_density': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy_cv': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': u"orm['neuroelectro.BrainRegion']"})
},
u'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'neuroelectro.user': {
'Meta': {'object_name': 'User'},
'assigned_neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Institution']", 'null': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_curator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lab_head': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'lab_website_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'neuroelectro.usersubmission': {
'Meta': {'object_name': 'UserSubmission'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
},
u'neuroelectro.userupload': {
'Meta': {'object_name': 'UserUpload'},
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
}
}
complete_apps = ['neuroelectro'] | gpl-2.0 |
aninternetof/bremen | bremenenv/lib/python3.5/site-packages/django/db/utils.py | 192 | 11323 | import inspect
import os
import pkgutil
import warnings
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import npath, upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError): # NOQA: StandardError undefined on PY3
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([npath(backend_dir)])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
if six.PY3:
sig = inspect.signature(router.allow_migrate)
has_deprecated_signature = not any(
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
else:
argspec = inspect.getargspec(router.allow_migrate)
has_deprecated_signature = len(argspec.args) == 3 and not argspec.keywords
if has_deprecated_signature:
warnings.warn(
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 1.10.",
RemovedInDjango110Warning)
model = hints.get('model')
allow = None if model is None else method(db, model)
else:
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| mit |
jacebrowning/gdm-demo | setup.py | 1 | 1027 | #!/usr/bin/env python
"""Setup script for GDM."""
import setuptools
from gdm import __project__, __version__, CLI, DESCRIPTION
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description=DESCRIPTION,
url='https://github.com/jacebrowning/gdm',
author='Jace Browning',
author_email='jacebrowning@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': [CLI + ' = gdm.cli:main']},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=open('requirements.txt').readlines(),
)
| mit |
brenton/openshift-ansible | inventory/openstack/hosts/openstack.py | 11 | 8956 | #!/usr/bin/env python
# pylint: skip-file
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/stackforge/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
# If you want to configure via clouds.yaml, you can put the file in:
# - Current directory
# - ~/.config/openstack/clouds.yaml
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will connect to
# all of them and present them as one contiguous inventory.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
import os_client_config
import shade
import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
else:
groups = open(cache_file, 'r').read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
openstack=server)
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
def get_cache_settings():
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
output = get_host_groups(inventory, refresh=args.refresh)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except shade.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| apache-2.0 |
jansel/opentuner | examples/unitary/unitary.py | 1 | 4271 | #!/usr/bin/env python
#
# This is a quantum control example motivated by the experimental need
# to synthesize unitary matrices in SU(2) in optimal time, given an
# explicit and finite control set generating the whole space, and an
# admissible error.
#
# See problem_description.pdf for additional details.
#
# Contributed by Clarice D. Aiello <clarice@mit.edu>
#
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import adddeps # fix sys.path
import argparse
import logging
import math
import random
import sys
try:
import numpy as np
except:
print('''
ERROR: import numpy failed, please install numpy
Possible things to try:
../../venv/bin/pip install numpy
../../venv/bin/easy_install numpy
sudo apt-get install python-numpy
''', file=sys.stderr)
raise
import opentuner
from math import sqrt
import cla_func
from input_generator import (generate_random_Ugoal_HARD,
generate_random_Ugoal_EASY,
generate_random_Ugoal_RANDOM)
from opentuner.search.manipulator import (ConfigurationManipulator,
SwitchParameter,
IntegerParameter,
FloatParameter)
def generate_random_Ugoal_FIXED(**kwargs):
Ag = old_div(-1, sqrt(10));
Bg = old_div(sqrt(2), sqrt(10));
Cg = old_div(-sqrt(3), sqrt(10));
Dg = old_div(-sqrt(4), sqrt(10));
return cla_func.np.matrix(
[[Ag + Cg * 1j, Bg + Dg * 1j], [-Bg + Dg * 1j, Ag - Cg * 1j]])
log = logging.getLogger(__name__)
generators = {
'hard': generate_random_Ugoal_HARD,
'easy': generate_random_Ugoal_EASY,
'random': generate_random_Ugoal_RANDOM,
'fixed': generate_random_Ugoal_FIXED,
}
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
parser.add_argument('--seq-len', type=int, default=10,
help='maximum length for generated sequence')
parser.add_argument('--goal-type', choices=list(generators.keys()), default='hard',
help='method used to generate goal')
parser.add_argument('--goal-n', type=int, default=100,
help='argument to ugoal generator')
parser.add_argument('--goal-alpha', type=float,
default=random.random() * math.pi,
help='argument to ugoal generator')
class Unitary(opentuner.measurement.MeasurementInterface):
def __init__(self, *pargs, **kwargs):
super(Unitary, self).__init__(*pargs, **kwargs)
self.op = cla_func.Op()
self.num_operators = len(self.op.M)
self.Ugoal = generators[args.goal_type](N=args.goal_n,
alpha=args.goal_alpha)
def run(self, desired_result, input, limit):
cfg = desired_result.configuration.data
sequence = [cfg[i] for i in range(self.args.seq_len)
if cfg[i] < self.num_operators]
# sequence can be shorter than self.args.seq_len with null operator
if len(sequence) > 0:
accuracy = cla_func.calc_fidelity(sequence, self.op, self.Ugoal)
# ~.99 is acceptable
else:
accuracy = 0.0
return opentuner.resultsdb.models.Result(time=0.0,
accuracy=accuracy,
size=len(sequence))
def manipulator(self):
manipulator = ConfigurationManipulator()
for d in range(self.args.seq_len):
# we add 1 to num_operators allow a ignored 'null' operator
manipulator.add_parameter(SwitchParameter(d, self.num_operators + 1))
return manipulator
def save_final_config(self, configuration):
'''
called at the end of autotuning with the best resultsdb.models.Configuration
'''
cfg = configuration.data
sequence = [cfg[i] for i in range(self.args.seq_len)
if cfg[i] < self.num_operators]
print("Final sequence", sequence)
def objective(self):
# we could have also chosen to store 1.0 - accuracy in the time field
# and use the default MinimizeTime() objective
return opentuner.search.objective.MaximizeAccuracyMinimizeSize()
if __name__ == '__main__':
args = parser.parse_args()
Unitary.main(args)
| mit |
wkh124/wkh124 | blast_cluster_sequences.py | 1 | 1426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Cluster sequence IDs into groups of IDs whose sequences blast together.
Input file is the result file (outfmt 0) of fasta file blasted against itself.
Usage:
%program <input_file> <output_file>"""
import sys
from collections import defaultdict
from copy import deepcopy
try:
in_file = sys.argv[1]
out_file = sys.argv[2]
except:
print __doc__
sys.exit(0)
in_dict = defaultdict(set)
all_ids = set()
clusters = []
MAX_DEPTH = 100
with open(in_file) as f:
for line in f:
all_ids.update(line.strip().split())
k = line.strip().split()[0]
in_dict[k].update(line.strip().split())
total_len = len(in_dict)
print "There are", len(all_ids), "unique identifiers"
while len(in_dict.items()) > 0:
sys.stdout.write("\r" + str(len (in_dict)) + " of " + str(total_len) + ": " + str(100 * len(in_dict) / total_len) + "% ")
sys.stdout.flush()
temp = in_dict.popitem()
items = set(temp[1])
for d in range(MAX_DEPTH):
temp_items = deepcopy(items)
for i in items:
temp_items.update(in_dict[i])
in_dict.pop(i)
items = deepcopy(temp_items)
clusters.append(items)
clusters.sort(key=len, reverse=True)
print "which regroup into", len(clusters), "clusters"
with open(out_file, "w") as f:
for c in sorted(clusters):
f.write("\t".join(c) + "\n")
| gpl-3.0 |
wartman4404/servo | tests/wpt/css-tests/tools/html5lib/html5lib/html5parser.py | 423 | 117297 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| mpl-2.0 |
qzhuyan/linux | scripts/gdb/linux/symbols.py | 467 | 6343 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 |
andresgz/django | tests/i18n/tests.py | 87 | 85521 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import decimal
import gettext as gettext_module
import os
import pickle
from contextlib import contextmanager
from importlib import import_module
from threading import local
from unittest import skipUnless
from django import forms
from django.conf import settings
from django.template import Context, Template, TemplateSyntaxError
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six, translation
from django.utils._os import upath
from django.utils.formats import (
date_format, get_format, get_format_modules, iter_format_modules, localize,
localize_input, reset_format_cache, sanitize_separators, time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeBytes, SafeString, SafeText, mark_safe
from django.utils.six import PY3
from django.utils.translation import (
LANGUAGE_SESSION_KEY, activate, check_for_language, deactivate,
get_language, get_language_bidi, get_language_from_request,
get_language_info, gettext, gettext_lazy, ngettext_lazy, npgettext,
npgettext_lazy, pgettext, pgettext_lazy, string_concat, to_locale,
trans_real, ugettext, ugettext_lazy, ungettext, ungettext_lazy,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(upath(__file__)))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, 'other', 'locale'),
]
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override('fr')
def test_plural(self):
"""
Test plurals with ungettext. French differs from English in that 0 is singular.
"""
self.assertEqual(ungettext("%d year", "%d years", 0) % 0, "0 année")
self.assertEqual(ungettext("%d year", "%d years", 2) % 2, "2 années")
self.assertEqual(ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}, "0 octet")
self.assertEqual(ungettext("%(size)d byte", "%(size)d bytes", 2) % {'size': 2}, "2 octets")
def test_override(self):
activate('de')
try:
with translation.override('pl'):
self.assertEqual(get_language(), 'pl')
self.assertEqual(get_language(), 'de')
with translation.override(None):
self.assertEqual(get_language(), None)
with translation.override('pl'):
pass
self.assertEqual(get_language(), None)
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_decorator(self):
@translation.override('pl')
def func_pl():
self.assertEqual(get_language(), 'pl')
@translation.override(None)
def func_none():
self.assertEqual(get_language(), None)
try:
activate('de')
func_pl()
self.assertEqual(get_language(), 'de')
func_none()
self.assertEqual(get_language(), 'de')
finally:
deactivate()
def test_override_exit(self):
"""
Test that the language restored is the one used when the function was
called, not the one used when the decorator was initialized. refs #23381
"""
activate('fr')
@translation.override('pl')
def func_pl():
pass
deactivate()
try:
activate('en')
func_pl()
self.assertEqual(get_language(), 'en')
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = ugettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual('Add Ringo', s % d)
with translation.override('de', deactivate=True):
self.assertEqual('Ringo hinzuf\xfcgen', s % d)
with translation.override('pl'):
self.assertEqual('Dodaj Ringo', s % d)
# It should be possible to compare *_lazy objects.
s1 = ugettext_lazy('Add %(name)s')
self.assertEqual(s, s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = ugettext_lazy('Some other string')
self.assertNotEqual(s, s4)
@skipUnless(six.PY2, "No more bytestring translations on PY3")
def test_lazy_and_bytestrings(self):
# On Python 2, (n)gettext_lazy should not transform a bytestring to unicode
self.assertEqual(gettext_lazy(b"test").upper(), b"TEST")
self.assertEqual((ngettext_lazy(b"%d test", b"%d tests") % 1).upper(), b"1 TEST")
# Other versions of lazy functions always return unicode
self.assertEqual(ugettext_lazy(b"test").upper(), "TEST")
self.assertEqual((ungettext_lazy(b"%d test", b"%d tests") % 1).upper(), "1 TEST")
self.assertEqual(pgettext_lazy(b"context", b"test").upper(), "TEST")
self.assertEqual(
(npgettext_lazy(b"context", b"%d test", b"%d tests") % 1).upper(),
"1 TEST"
)
def test_lazy_pickle(self):
s1 = ugettext_lazy("test")
self.assertEqual(six.text_type(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(six.text_type(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ungettext_lazy(self):
simple_with_format = ungettext_lazy('%d good result', '%d good results')
simple_str_with_format = ngettext_lazy(str('%d good result'), str('%d good results'))
simple_context_with_format = npgettext_lazy('Exclamation', '%d good result', '%d good results')
simple_without_format = ungettext_lazy('good result', 'good results')
with translation.override('de'):
self.assertEqual(simple_with_format % 1, '1 gutes Resultat')
self.assertEqual(simple_with_format % 4, '4 guten Resultate')
self.assertEqual(simple_str_with_format % 1, str('1 gutes Resultat'))
self.assertEqual(simple_str_with_format % 4, str('4 guten Resultate'))
self.assertEqual(simple_context_with_format % 1, '1 gutes Resultat!')
self.assertEqual(simple_context_with_format % 4, '4 guten Resultate!')
self.assertEqual(simple_without_format % 1, 'gutes Resultat')
self.assertEqual(simple_without_format % 4, 'guten Resultate')
complex_nonlazy = ungettext_lazy('Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4)
complex_deferred = ungettext_lazy(
'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
complex_str_nonlazy = ngettext_lazy(
str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 4
)
complex_str_deferred = ngettext_lazy(
str('Hi %(name)s, %(num)d good result'), str('Hi %(name)s, %(num)d good results'), 'num'
)
complex_context_nonlazy = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 4
)
complex_context_deferred = npgettext_lazy(
'Greeting', 'Hi %(name)s, %(num)d good result', 'Hi %(name)s, %(num)d good results', 'num'
)
with translation.override('de'):
self.assertEqual(complex_nonlazy % {'num': 4, 'name': 'Jim'}, 'Hallo Jim, 4 guten Resultate')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 1}, 'Hallo Jim, 1 gutes Resultat')
self.assertEqual(complex_deferred % {'name': 'Jim', 'num': 5}, 'Hallo Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_deferred % {'name': 'Jim'}
self.assertEqual(complex_str_nonlazy % {'num': 4, 'name': 'Jim'}, str('Hallo Jim, 4 guten Resultate'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 1}, str('Hallo Jim, 1 gutes Resultat'))
self.assertEqual(complex_str_deferred % {'name': 'Jim', 'num': 5}, str('Hallo Jim, 5 guten Resultate'))
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_str_deferred % {'name': 'Jim'}
self.assertEqual(complex_context_nonlazy % {'num': 4, 'name': 'Jim'}, 'Willkommen Jim, 4 guten Resultate')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 1}, 'Willkommen Jim, 1 gutes Resultat')
self.assertEqual(complex_context_deferred % {'name': 'Jim', 'num': 5}, 'Willkommen Jim, 5 guten Resultate')
with six.assertRaisesRegex(self, KeyError, 'Your dictionary lacks key.*'):
complex_context_deferred % {'name': 'Jim'}
@skipUnless(six.PY2, "PY3 doesn't have distinct int and long types")
def test_ungettext_lazy_long(self):
"""
Regression test for #22820: int and long should be treated alike in ungettext_lazy.
"""
result = ungettext_lazy('%(name)s has %(num)d good result', '%(name)s has %(num)d good results', 4)
self.assertEqual(result % {'name': 'Joe', 'num': 4}, "Joe has 4 good results")
# Now with a long
result = ungettext_lazy(
'%(name)s has %(num)d good result', '%(name)s has %(num)d good results',
long(4) # NOQA: long undefined on PY3
)
self.assertEqual(result % {'name': 'Joe', 'num': 4}, "Joe has 4 good results")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""
Ensure that message contexts are taken into account the {% trans %} and
{% blocktrans %} template tags.
Refs #14806.
"""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# {% trans %} -----------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% trans "May" context "unexisting" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% trans "May" context "month name" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context "verb" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'as'
t = Template('{% load i18n %}{% trans "May" context "month name" as var %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Mai')
t = Template('{% load i18n %}{% trans "May" as var context "verb" %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Kann')
# {% blocktrans %} ------------------------------
# Inexisting context...
t = Template('{% load i18n %}{% blocktrans context "unexisting" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context...
# Using a literal
t = Template('{% load i18n %}{% blocktrans context "month name" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context "verb" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = Template(
'{% load i18n %}{% blocktrans count number=1 context "super search" %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans count number=2 context "super search" %}{{ number }}'
' super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=1 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "other comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Using trimmed
t = Template(
'{% load i18n %}{% blocktrans trimmed %}\n\nThere\n\t are 5 '
'\n\n comments\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'There are 5 comments')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" trimmed %}\n\n'
'There are \t\n \t {{ num_comments }} comments\n\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 trimmed %}\n'
'{{ number }} super \n result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Mis-uses
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context with month="May" %}{{ month }}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context %}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template(
'{% load i18n %}{% blocktrans count number=2 context %}'
'{{ number }} super result{% plural %}{{ number }}'
' super results{% endblocktrans %}'
)
def test_string_concat(self):
"""
six.text_type(string_concat(...)) should not raise a TypeError - #4796
"""
self.assertEqual('django', six.text_type(string_concat("dja", "ngo")))
def test_empty_value(self):
"""
Empty value must stay empty after being translated (#23196).
"""
with translation.override('de'):
self.assertEqual("", ugettext(""))
self.assertEqual(str(""), gettext(str("")))
s = mark_safe("")
self.assertEqual(s, ugettext(s))
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping shouldn't change the "safe" status.
"""
s = mark_safe(str('Password'))
self.assertEqual(SafeString, type(s))
with translation.override('de', deactivate=True):
self.assertEqual(SafeText, type(ugettext(s)))
self.assertEqual('aPassword', SafeText('a') + s)
self.assertEqual('Passworda', s + SafeText('a'))
self.assertEqual('Passworda', s + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with mac or dos end of lines will be converted
to unix eof in .po catalogs, and they have to match when retrieved
"""
ca_translation = trans_real.translation('ca')
ca_translation._catalog['Mac\nEOF\n'] = 'Catalan Mac\nEOF\n'
ca_translation._catalog['Win\nEOF\n'] = 'Catalan Win\nEOF\n'
with translation.override('ca', deactivate=True):
self.assertEqual('Catalan Mac\nEOF\n', ugettext('Mac\rEOF\r'))
self.assertEqual('Catalan Win\nEOF\n', ugettext('Win\r\nEOF\r\n'))
def test_to_locale(self):
"""
Tests the to_locale function and the special case of Serbian Latin
(refs #12230 and r11299)
"""
self.assertEqual(to_locale('en-us'), 'en_US')
self.assertEqual(to_locale('sr-lat'), 'sr_Lat')
def test_to_language(self):
"""
Test the to_language function
"""
self.assertEqual(trans_real.to_language('en_US'), 'en-us')
self.assertEqual(trans_real.to_language('sr_Lat'), 'sr-lat')
def test_language_bidi(self):
self.assertEqual(get_language_bidi(), False)
with translation.override(None):
self.assertEqual(get_language_bidi(), False)
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering
(%(person)s is translated as %(personne)s in fr.po)
Refs #16516.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
Refs #18393.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations['en-YY'] = None
return res
trans_real._translations = {sideeffect_str('en-XX'): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
try:
translation.activate('pl')
except RuntimeError:
self.fail('translation.activate() is not thread-safe')
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
@override_settings(USE_L10N=True)
class FormattingTests(SimpleTestCase):
def setUp(self):
super(FormattingTests, self).setUp()
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.l = 10000 if PY3 else long(10000) # NOQA: long undefined on PY3
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.l,
})
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual('66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual('66666', nformat(self.n, decimal_sep='X', decimal_pos=0, grouping=1, thousand_sep='Y'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
'66,666.66',
nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=',')
)
self.assertEqual(
'6B6B6B6B6A6',
nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B')
)
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual('10000.0', nformat(self.l, decimal_sep='.', decimal_pos=1))
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
self.assertEqual('10000', nformat(self.l, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True))
# date filter
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override('ca', deactivate=True):
self.maxDiff = 3000
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15 a.m.', time_format(self.t))
self.assertEqual('des. 31, 2009', date_format(self.d))
self.assertEqual('desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('des. 31, 2009', localize(self.d))
self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertFalse(form.is_valid())
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual(['Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual(['Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual(['Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual(['Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="0">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected="selected">desembre</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="0">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected="selected">31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="0">---</option>'
'<option value="2009" selected="selected">2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
with self.settings(USE_THOUSAND_SEPARATOR=True,
NUMBER_GROUPING=1, THOUSAND_SEPARATOR='!'):
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
def test_false_like_locale_formats(self):
"""
Ensure that the active locale's formats take precedence over the
default settings even if they would be interpreted as False in a
conditional test (e.g. 0 or empty string).
Refs #16938.
"""
with patch_formats('fr', THOUSAND_SEPARATOR='', FIRST_DAY_OF_WEEK=0):
with translation.override('fr'):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR='!'):
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
# Even a second time (after the format has been cached)...
self.assertEqual('', get_format('THOUSAND_SEPARATOR'))
with self.settings(FIRST_DAY_OF_WEEK=1):
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
# Even a second time (after the format has been cached)...
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override('ca', deactivate=True):
self.assertEqual('j \d\e F \d\e Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('10:15', time_format(self.t))
self.assertEqual('31 de desembre de 2009', date_format(self.d))
self.assertEqual('desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', localize(self.n))
self.assertEqual('99.999,999', localize(self.f))
self.assertEqual('10.000', localize(self.l))
self.assertEqual('True', localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', localize(self.n))
self.assertEqual('99999,999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('31 de desembre de 2009', localize(self.d))
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10.000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm({
'decimal_field': '66.666,666',
'float_field': '99.999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1.234',
})
self.assertTrue(form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('10:15', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual('31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'31/12/2009 20:50',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm({
'decimal_field': '66666,666',
'float_field': '99999,999',
'date_field': '31/12/2009',
'datetime_field': '31/12/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="0">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected="selected">31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="0">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected="selected">desembre</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="0">---</option>'
'<option value="2009" selected="selected">2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# Russian locale (with E as month)
with translation.override('ru', deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="0">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected="selected">31</option>'
'</select>'
'<select name="mydate_month" id="id_mydate_month">'
'<option value="0">---</option>'
'<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>'
'<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>'
'<option value="3">\u041c\u0430\u0440\u0442</option>'
'<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>'
'<option value="5">\u041c\u0430\u0439</option>'
'<option value="6">\u0418\u044e\u043d\u044c</option>'
'<option value="7">\u0418\u044e\u043b\u044c</option>'
'<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>'
'<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>'
'<option value="12" selected="selected">\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="0">---</option>'
'<option value="2009" selected="selected">2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# English locale
with translation.override('en', deactivate=True):
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual('Dec. 31, 2009', date_format(self.d))
self.assertEqual('December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual('12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', localize(self.n))
self.assertEqual('99,999.999', localize(self.f))
self.assertEqual('10,000', localize(self.l))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', localize(self.n))
self.assertEqual('99999.999', localize(self.f))
self.assertEqual('10000', localize(self.l))
self.assertEqual('Dec. 31, 2009', localize(self.d))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual('66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('10,000', Template('{{ l }}').render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual('99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual('66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual('100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual('12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(
'12/31/2009 8:50 p.m.',
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt)
)
form5 = I18nForm({
'decimal_field': '66666.666',
'float_field': '99999.999',
'date_field': '12/31/2009',
'datetime_field': '12/31/2009 20:50',
'time_field': '20:50',
'integer_field': '1234',
})
self.assertTrue(form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': '12',
'date_field_day': '31',
'date_field_year': '2009'
})
self.assertTrue(form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="0">---</option>'
'<option value="1">January</option>'
'<option value="2">February</option>'
'<option value="3">March</option>'
'<option value="4">April</option>'
'<option value="5">May</option>'
'<option value="6">June</option>'
'<option value="7">July</option>'
'<option value="8">August</option>'
'<option value="9">September</option>'
'<option value="10">October</option>'
'<option value="11">November</option>'
'<option value="12" selected="selected">December</option>'
'</select>'
'<select name="mydate_day" id="id_mydate_day">'
'<option value="0">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected="selected">31</option>'
'</select>'
'<select name="mydate_year" id="id_mydate_year">'
'<option value="0">---</option>'
'<option value="2009" selected="selected">2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
'</select>',
forms.SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override('de-at', deactivate=True):
self.assertEqual('66.666,666', Template('{{ n }}').render(self.ctxt))
with translation.override('es-us', deactivate=True):
self.assertEqual('31 de Diciembre de 2009', date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
form6 = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input id="id_name" type="text" name="name" value="acme" maxlength="50" /></li>'
'<li><label for="id_date_added">Date added:</label>'
'<input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" /></li>'
'<li><label for="id_cents_paid">Cents paid:</label>'
'<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" /></li>'
'<li><label for="id_products_delivered">Products delivered:</label>'
'<input type="text" name="products_delivered" value="12000" id="id_products_delivered" /></li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML(
'<input type="text" name="products_delivered" value="12.000" id="id_products_delivered" />',
form6.as_ul()
)
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override('ru', deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Check that usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('1\xa0234\xa0567'), '1234567')
self.assertEqual(sanitize_separators('77\xa0777,777'), '77777.777')
self.assertEqual(sanitize_separators('12 345'), '12345')
self.assertEqual(sanitize_separators('77 777,777'), '77777.777')
with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=False):
self.assertEqual(sanitize_separators('12\xa0345'), '12\xa0345')
with patch_formats(get_language(), THOUSAND_SEPARATOR='.', DECIMAL_SEPARATOR=','):
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators('10.234'), '10234')
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators('10.10'), '10.10')
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module('django.conf.locale.de.formats')
test_mod = import_module('i18n.other.locale.de.formats')
test_mod2 = import_module('i18n.other2.locale.de.formats')
with translation.override('de-at', deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules('de')), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules('de', 'i18n.other.locale')),
[test_mod, default_mod])
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(iter_format_modules('de', ['i18n.other.locale', 'i18n.other2.locale'])),
[test_mod, test_mod2, default_mod])
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_lang(self):
with translation.override('de', deactivate=True):
self.assertEqual('.', get_format('DECIMAL_SEPARATOR', lang='en'))
def test_get_format_modules_stability(self):
with self.settings(FORMAT_MODULE_PATH='i18n.other.locale'):
with translation.override('de', deactivate=True):
old = str("%r") % get_format_modules(reverse=True)
new = str("%r") % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
def test_localize_templatetag_and_filter(self):
"""
Tests the {% localize %} templatetag
"""
context = Context({'value': 3.14})
template1 = Template(
'{% load l10n %}{% localize %}{{ value }}{% endlocalize %};'
'{% localize on %}{{ value }}{% endlocalize %}'
)
template2 = Template("{% load l10n %}{{ value }};{% localize off %}{{ value }};{% endlocalize %}{{ value }}")
template3 = Template('{% load l10n %}{{ value }};{{ value|unlocalize }}')
template4 = Template('{% load l10n %}{{ value }};{{ value|localize }}')
output1 = '3,14;3,14'
output2 = '3,14;3.14;3,14'
output3 = '3,14;3.14'
output4 = '3.14;3,14'
with translation.override('de', deactivate=True):
with self.settings(USE_L10N=False):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_L10N=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_as_text_as_hidden_input(self):
"""
Tests if form input with 'as_hidden' or 'as_text' is correctly localized. Ticket #18777
"""
self.maxDiff = 1200
with translation.override('de-at', deactivate=True):
template = Template('{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}')
template_as_text = Template('{% load l10n %}{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}')
template_as_hidden = Template(
'{% load l10n %}{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}'
)
form = CompanyForm({
'name': 'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_paid': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
context = Context({'form': form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" value="31.12.2009 06:00:00" />;'
' <input id="id_cents_paid" name="cents_paid" type="text" value="59,47" />'
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" value="31.12.2009 06:00:00" />;'
'<input id="id_cents_paid" name="cents_paid" type="hidden" value="59,47" />'
)
class MiscTests(SimpleTestCase):
def setUp(self):
super(MiscTests, self).setUp()
self.rf = RequestFactory()
@override_settings(LANGUAGE_CODE='de')
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(ugettext("Image"), "Bild")
with translation.override('en'):
self.assertEqual(ugettext("Image"), "Image")
with translation.override('en-us'):
self.assertEqual(ugettext("Image"), "Image")
with translation.override('en-ca'):
self.assertEqual(ugettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
p = trans_real.parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-au', 1.0)], p('en-AU'))
self.assertEqual([('es-419', 1.0)], p('es-419'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-au', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual(
[('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)],
p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125')
)
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([('en', 1.0), ('*', 0.5)], p('en; q=1.0, * ; q=0.5'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual(
[],
p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en')
)
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p('12-345'))
self.assertEqual([], p(''))
self.assertEqual([], p('en; q=1,'))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans,de'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'NL'}
self.assertEqual('nl', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'fy'}
self.assertEqual('fy', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ia'}
self.assertEqual('ia', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'sr-latn'}
self.assertEqual('sr-latn', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hans'}
self.assertEqual('zh-hans', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-hant'}
self.assertEqual('zh-hant', g(r))
@override_settings(
LANGUAGES=[
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('zh-hant', 'Traditional Chinese'),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE etc) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,en'}
self.assertEqual(g(r), 'zh-hans')
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-tw,en'}
self.assertEqual(g(r), 'zh-hant')
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-my,en'}
self.assertEqual(get_language_from_request(r), 'zh-hans')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh-hans)
# the user sets zh-hans as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-hans')
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
self.assertEqual(g('/pl/'), 'pl')
self.assertEqual(g('/pl'), 'pl')
self.assertEqual(g('/xyz/'), None)
def test_get_language_from_path_null(self):
from django.utils.translation.trans_null import get_language_from_path as g
self.assertEqual(g('/pl/'), None)
self.assertEqual(g('/pl'), None)
self.assertEqual(g('/xyz/'), None)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = Template("{% load i18n %}{% blocktrans %}The result was {{ percent }}%{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"{{ percent }}% represents {{ num }} object{% plural %}"
"{{ percent }}% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktrans(self):
"""
Test that using Python's %-formatting is properly escaped in blocktrans,
singular or plural
"""
t_sing = Template("{% load i18n %}{% blocktrans %}There are %(num_comments)s comments{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"%(percent)s% represents {{ num }} object{% plural %}"
"%(percent)s% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
def test_cache_resetting(self):
"""
#14170 after setting LANGUAGE, cache should be cleared and languages
previously valid should not be used.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
with self.settings(LANGUAGES=[('en', 'English')]):
self.assertNotEqual('pt-br', g(r))
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super(ResolutionOrderI18NTests, self).setUp()
activate('de')
def tearDown(self):
deactivate()
super(ResolutionOrderI18NTests, self).tearDown()
def assertUgettext(self, msgid, msgstr):
result = ugettext(msgid)
self.assertIn(msgstr, result, ("The string '%s' isn't in the "
"translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result)))
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE='de')
def test_app_translation(self):
# Original translation.
self.assertUgettext('Date/time', 'Datum/Zeit')
# Different translation.
with self.modify_settings(INSTALLED_APPS={'append': 'i18n.resolution'}):
# Force refreshing translations.
activate('de')
# Doesn't work because it's added later in the list.
self.assertUgettext('Date/time', 'Datum/Zeit')
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.admin.apps.SimpleAdminConfig'}):
# Force refreshing translations.
activate('de')
# Unless the original is removed from the list.
self.assertUgettext('Date/time', 'Datum/Zeit (APP)')
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertUgettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=['i18n.resolution']):
self.assertUgettext('Time', 'LOCALE_PATHS')
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(ugettext('Date/time'), 'Datum/Zeit')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeText('Iñtërnâtiônàlizætiøn1')
c.save()
c.name = SafeBytes('Iñtërnâtiônàlizætiøn1'.encode('utf-8'))
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx\.", get_language_info, 'xx')
def test_unknown_only_country_code(self):
li = get_language_info('de-xx')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], 'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
def test_unknown_language_code_and_country_code(self):
six.assertRaisesRegex(self, KeyError, r"Unknown language code xx-xx and xx\.", get_language_info, 'xx-xx')
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info('zh-my')
self.assertEqual(li['code'], 'zh-hans')
li = get_language_info('zh-hans')
self.assertEqual(li['code'], 'zh-hans')
class MultipleLocaleActivationTests(SimpleTestCase):
"""
Tests for template rendering behavior when multiple locales are activated
during the lifetime of the same process.
"""
def setUp(self):
super(MultipleLocaleActivationTests, self).setUp()
self._old_language = get_language()
def tearDown(self):
super(MultipleLocaleActivationTests, self).tearDown()
activate(self._old_language)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n constructs.
"""
with translation.override('fr'):
self.assertEqual(Template("{{ _('Yes') }}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% trans 'Yes' %}").render(Context({})), 'Oui')
self.assertEqual(
Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})),
'Oui'
)
# Literal marked up with _() in a filter expression
def test_multiple_locale_filter(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'nee')
# Literal marked up with _()
def test_multiple_locale(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch(self):
with translation.override('de'):
t = Template("{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# Literal marked up with _(), loading the i18n template tag library
def test_multiple_locale_loadi18n(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_deactivate(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_direct_switch(self):
with translation.override('de'):
t = Template("{% load i18n %}{{ _('No') }}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# trans i18n tag
def test_multiple_locale_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_trans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
# blocktrans i18n tag
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
@override_settings(
USE_I18N=True,
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls',
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get('/fr/streaming/')
self.assertContains(response, "Oui/Non")
response = self.client.get('/en/streaming/')
self.assertContains(response, "Yes/No")
@override_settings(
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def test_language_not_saved_to_session(self):
"""Checks that current language is not automatically saved to
session on every request."""
# Regression test for #21473
self.client.get('/fr/simple/')
self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session)
@override_settings(
USE_I18N=True,
LANGUAGES=[
('bg', 'Bulgarian'),
('en-us', 'English'),
('pt-br', 'Portugese (Brazil)'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.urls'
)
class CountrySpecificLanguageTests(SimpleTestCase):
def setUp(self):
super(CountrySpecificLanguageTests, self).setUp()
self.rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language('en'))
self.assertTrue(check_for_language('en-us'))
self.assertTrue(check_for_language('en-US'))
self.assertTrue(check_for_language('be'))
self.assertTrue(check_for_language('be@latin'))
self.assertTrue(check_for_language('sr-RS@latin'))
self.assertTrue(check_for_language('sr-RS@12345'))
self.assertFalse(check_for_language('en-ü'))
self.assertFalse(check_for_language('en\x00'))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language('be@ '))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language('tr-TR.UTF-8'))
self.assertFalse(check_for_language('tr-TR.UTF8'))
self.assertFalse(check_for_language('de-DE.utf-8'))
def test_get_language_from_request(self):
# issue 19919
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8,bg;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('en-us', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('bg', lang)
def test_specific_language_codes(self):
# issue 11915
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4'}
lang = get_language_from_request(r)
self.assertEqual('pt-br', lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super(TranslationFilesMissing, self).setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super(TranslationFilesMissing, self).tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
'''
Ensure IOError is raised if the default language is unparseable.
Refs: #18192
'''
self.patchGettextFind()
trans_real._translations = {}
self.assertRaises(IOError, activate, 'en')
| bsd-3-clause |
awalls-cx18/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 6 | 6198 | #!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/distutils/log.py | 163 | 1908 | """A simple log mechanism styled after PEP 282."""
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
raise ValueError('%s wrong log level' % str(level))
if level >= self.threshold:
if args:
msg = msg % args
if level in (WARN, ERROR, FATAL):
stream = sys.stderr
else:
stream = sys.stdout
if stream.errors == 'strict':
# emulate backslashreplace error handler
encoding = stream.encoding
msg = msg.encode(encoding, "backslashreplace").decode(encoding)
stream.write('%s\n' % msg)
stream.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| apache-2.0 |
nesdis/djongo | tests/django_tests/tests/v21/tests/model_fields/test_foreignkey.py | 21 | 3762 | from decimal import Decimal
from django.apps import apps
from django.core import checks
from django.db import models
from django.test import TestCase, skipIfDBFeature
from django.test.utils import isolate_apps
from .models import Bar, FkToChar, Foo, PrimaryKeyCharModel
class ForeignKeyTests(TestCase):
def test_callable_default(self):
"""A lazy callable may be used for ForeignKey.default."""
a = Foo.objects.create(id=1, a='abc', d=Decimal('12.34'))
b = Bar.objects.create(b='bcd')
self.assertEqual(b.a, a)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Empty strings foreign key values don't get converted to None (#19299).
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
@isolate_apps('model_fields')
def test_warning_when_unique_true_on_fk(self):
class Foo(models.Model):
pass
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, models.CASCADE, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field('a').remote_field.related_name
self.assertIsInstance(rel_name, str)
def test_abstract_model_pending_operations(self):
"""
Foreign key fields declared on abstract models should not add lazy
relations to resolve relationship declared as string (#24215).
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractForeignKeyModel(models.Model):
fk = models.ForeignKey('missing.FK', models.CASCADE)
class Meta:
abstract = True
self.assertIs(AbstractForeignKeyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
'Pending lookup added for a foreign key on an abstract model'
)
@isolate_apps('model_fields', 'model_fields.tests')
def test_abstract_model_app_relative_foreign_key(self):
class AbstractReferent(models.Model):
reference = models.ForeignKey('Referred', on_delete=models.CASCADE)
class Meta:
app_label = 'model_fields'
abstract = True
def assert_app_model_resolved(label):
class Referred(models.Model):
class Meta:
app_label = label
class ConcreteReferent(AbstractReferent):
class Meta:
app_label = label
self.assertEqual(ConcreteReferent._meta.get_field('reference').related_model, Referred)
assert_app_model_resolved('model_fields')
assert_app_model_resolved('tests')
@isolate_apps('model_fields')
def test_to_python(self):
class Foo(models.Model):
pass
class Bar(models.Model):
fk = models.ForeignKey(Foo, models.CASCADE)
self.assertEqual(Bar._meta.get_field('fk').to_python('1'), 1)
| agpl-3.0 |
jiachenning/odoo | addons/base_gengo/res_company.py | 321 | 1890 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False, groups="base.group_system"),
"gengo_public_key": fields.text("Gengo Public Key", copy=False, groups="base.group_user"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo", groups="base.group_user"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo.", groups="base.group_user"),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
zero-rp/miniblink49 | third_party/WebKit/Source/build/scripts/make_css_property_names.py | 8 | 7760 | #!/usr/bin/env python
import subprocess
import sys
import css_properties
import in_generator
import license
HEADER_TEMPLATE = """
%(license)s
#ifndef %(class_name)s_h
#define %(class_name)s_h
#include "core/css/parser/CSSParserMode.h"
#include "wtf/HashFunctions.h"
#include "wtf/HashTraits.h"
#include <string.h>
namespace WTF {
class AtomicString;
class String;
}
namespace blink {
enum CSSPropertyID {
CSSPropertyInvalid = 0,
%(property_enums)s
};
const int firstCSSProperty = %(first_property_id)s;
const int numCSSProperties = %(properties_count)s;
const int lastCSSProperty = %(last_property_id)d;
const int lastUnresolvedCSSProperty = %(last_unresolved_property_id)d;
const size_t maxCSSPropertyNameLength = %(max_name_length)d;
const char* getPropertyName(CSSPropertyID);
const WTF::AtomicString& getPropertyNameAtomicString(CSSPropertyID);
WTF::String getPropertyNameString(CSSPropertyID);
WTF::String getJSPropertyName(CSSPropertyID);
inline CSSPropertyID convertToCSSPropertyID(int value)
{
ASSERT((value >= firstCSSProperty && value <= lastCSSProperty) || value == CSSPropertyInvalid);
return static_cast<CSSPropertyID>(value);
}
inline CSSPropertyID resolveCSSPropertyID(CSSPropertyID id)
{
return convertToCSSPropertyID(id & ~512);
}
inline bool isPropertyAlias(CSSPropertyID id) { return id & 512; }
CSSPropertyID unresolvedCSSPropertyID(const WTF::String&);
CSSPropertyID cssPropertyID(const WTF::String&);
} // namespace blink
namespace WTF {
template<> struct DefaultHash<blink::CSSPropertyID> { typedef IntHash<unsigned> Hash; };
template<> struct HashTraits<blink::CSSPropertyID> : GenericHashTraits<blink::CSSPropertyID> {
static const bool emptyValueIsZero = true;
static void constructDeletedValue(blink::CSSPropertyID& slot, bool) { slot = static_cast<blink::CSSPropertyID>(blink::lastUnresolvedCSSProperty + 1); }
static bool isDeletedValue(blink::CSSPropertyID value) { return value == (blink::lastUnresolvedCSSProperty + 1); }
};
}
#endif // %(class_name)s_h
"""
GPERF_TEMPLATE = """
%%{
%(license)s
#include "config.h"
#include "%(class_name)s.h"
#include "core/css/HashTools.h"
#include <string.h>
#include "wtf/ASCIICType.h"
#include "wtf/text/AtomicString.h"
#include "wtf/text/WTFString.h"
namespace blink {
static const char propertyNameStringsPool[] = {
%(property_name_strings)s
};
static const unsigned short propertyNameStringsOffsets[] = {
%(property_name_offsets)s
};
%%}
%%struct-type
struct Property;
%%omit-struct-type
%%language=C++
%%readonly-tables
%%global-table
%%compare-strncmp
%%define class-name %(class_name)sHash
%%define lookup-function-name findPropertyImpl
%%define hash-function-name property_hash_function
%%define slot-name nameOffset
%%define word-array-name property_word_list
%%enum
%%%%
%(property_to_enum_map)s
%%%%
const Property* findProperty(register const char* str, register unsigned int len)
{
return %(class_name)sHash::findPropertyImpl(str, len);
}
const char* getPropertyName(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
return propertyNameStringsPool + propertyNameStringsOffsets[index];
}
const AtomicString& getPropertyNameAtomicString(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
static AtomicString* propertyStrings = new AtomicString[lastUnresolvedCSSProperty]; // Intentionally never destroyed.
AtomicString& propertyString = propertyStrings[index];
if (propertyString.isNull()) {
const char* propertyName = propertyNameStringsPool + propertyNameStringsOffsets[index];
propertyString = AtomicString(propertyName, strlen(propertyName), AtomicString::ConstructFromLiteral);
}
return propertyString;
}
String getPropertyNameString(CSSPropertyID id)
{
// We share the StringImpl with the AtomicStrings.
return getPropertyNameAtomicString(id).string();
}
String getJSPropertyName(CSSPropertyID id)
{
char result[maxCSSPropertyNameLength + 1];
const char* cssPropertyName = getPropertyName(id);
const char* propertyNamePointer = cssPropertyName;
if (!propertyNamePointer)
return emptyString();
char* resultPointer = result;
while (char character = *propertyNamePointer++) {
if (character == '-') {
char nextCharacter = *propertyNamePointer++;
if (!nextCharacter)
break;
character = (propertyNamePointer - 2 != cssPropertyName) ? toASCIIUpper(nextCharacter) : nextCharacter;
}
*resultPointer++ = character;
}
*resultPointer = '\\0';
return String(result);
}
CSSPropertyID cssPropertyID(const String& string)
{
return resolveCSSPropertyID(unresolvedCSSPropertyID(string));
}
} // namespace blink
"""
class CSSPropertyNamesWriter(css_properties.CSSProperties):
class_name = "CSSPropertyNames"
def __init__(self, in_file_path):
super(CSSPropertyNamesWriter, self).__init__(in_file_path)
self._outputs = {(self.class_name + ".h"): self.generate_header,
(self.class_name + ".cpp"): self.generate_implementation,
}
def _enum_declaration(self, property):
return " %(property_id)s = %(enum_value)s," % property
def generate_header(self):
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_enums': "\n".join(map(self._enum_declaration, self._properties_including_aliases)),
'first_property_id': self._first_enum_value,
'properties_count': len(self._properties),
'last_property_id': self._first_enum_value + len(self._properties) - 1,
'last_unresolved_property_id': max(property["enum_value"] for property in self._properties_including_aliases),
'max_name_length': max(map(len, self._properties)),
}
def generate_implementation(self):
enum_value_to_name = {property['enum_value']: property['name'] for property in self._properties_including_aliases}
property_offsets = []
property_names = []
current_offset = 0
for enum_value in range(1, max(enum_value_to_name) + 1):
property_offsets.append(current_offset)
if enum_value in enum_value_to_name:
name = enum_value_to_name[enum_value]
property_names.append(name)
current_offset += len(name) + 1
css_name_and_enum_pairs = [(property['name'], property['property_id']) for property in self._properties_including_aliases]
gperf_input = GPERF_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_name_strings': '\n'.join(' "%s\\0"' % name for name in property_names),
'property_name_offsets': '\n'.join(' %d,' % offset for offset in property_offsets),
'property_to_enum_map': '\n'.join('%s, %s' % property for property in css_name_and_enum_pairs),
}
# FIXME: If we could depend on Python 2.7, we would use subprocess.check_output
gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n']
gperf_args.extend(['-m', '50']) # Pick best of 50 attempts.
gperf_args.append('-D') # Allow duplicate hashes -> More compact code.
gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
return gperf.communicate(gperf_input)[0]
if __name__ == "__main__":
in_generator.Maker(CSSPropertyNamesWriter).main(sys.argv)
| apache-2.0 |
venzen/zen2pool | p2pool/test/util/test_math.py | 283 | 1198 | from __future__ import division
import random
import unittest
from p2pool.util import math
def generate_alphabet():
if random.randrange(2):
return None
else:
a = map(chr, xrange(256))
random.shuffle(a)
return a[:random.randrange(2, len(a))]
class Test(unittest.TestCase):
def test_add_tuples(self):
assert math.add_tuples((1, 2, 3), (4, 5, 6)) == (5, 7, 9)
def test_bases(self):
for i in xrange(10):
alphabet = generate_alphabet()
for i in xrange(100):
n = random.choice([
random.randrange(3),
random.randrange(300),
random.randrange(100000000000000000000000000000),
])
s = math.natural_to_string(n, alphabet)
n2 = math.string_to_natural(s, alphabet)
#print n, s.encode('hex'), n2
self.assertEquals(n, n2)
def test_binom(self):
for n in xrange(1, 100):
for x in xrange(n + 1):
left, right = math.binomial_conf_interval(x, n)
assert 0 <= left <= x/n <= right <= 1, (left, right, x, n)
| gpl-3.0 |
VerifiableRobotics/LTLMoP | src/lib/handlers/NXT/NXTDriveHandler.py | 7 | 1102 | #!/usr/bin/env python
"""
=================================================
NXTDrive.py - LEGO Mindstorms NXT Drive Handler
=================================================
Converts a desired global velocity vector into translational and rotational rates for a differential-drive robot,
using feedback linearization.
"""
from math import sin, cos, atan2
import lib.handlers.handlerTemplates as handlerTemplates
class NXTDriveHandler(handlerTemplates.DriveHandler):
def __init__(self, executor, shared_data):
"""
Initialization method of drive handler for any NXT.
"""
try:
self.loco = executor.hsub.getHandlerInstanceByType(handlerTemplates.LocomotionCommandHandler)
self.coordmap = executor.hsub.coordmap_lab2map
except NameError:
print "(DRIVE) Locomotion Command Handler not found."
exit(-1)
#self.d = d
def setVelocity(self, x, y, theta=0):
"""Defining the velocity to send to the NXT"""
vx = x
vy = y
self.loco.sendCommand([vx,vy])
| gpl-3.0 |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/pygments/styles/tango.py | 363 | 7096 | # -*- coding: utf-8 -*-
"""
pygments.styles.tango
~~~~~~~~~~~~~~~~~~~~~
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines
Butter: #fce94f #edd400 #c4a000
Orange: #fcaf3e #f57900 #ce5c00
Chocolate: #e9b96e #c17d11 #8f5902
Chameleon: #8ae234 #73d216 #4e9a06
Sky Blue: #729fcf #3465a4 #204a87
Plum: #ad7fa8 #75507b #5c35cc
Scarlet Red:#ef2929 #cc0000 #a40000
Aluminium: #eeeeec #d3d7cf #babdb6
#888a85 #555753 #2e3436
Not all of the above colors are used; other colors added:
very light grey: #f8f8f8 (for background)
This style can be used as a template as it includes all the known
Token types, unlike most (if not all) of the styles included in the
Pygments distribution.
However, since Crunchy is intended to be used by beginners, we have strived
to create a style that gloss over subtle distinctions between different
categories.
Taking Python for example, comments (Comment.*) and docstrings (String.Doc)
have been chosen to have the same style. Similarly, keywords (Keyword.*),
and Operator.Word (and, or, in) have been assigned the same style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class TangoStyle(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mit |
gmalmquist/pants | src/python/pants/backend/jvm/targets/jvm_app.py | 2 | 10801 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import OrderedDict, namedtuple
from hashlib import sha1
import six
from twitter.common.dirutil import Fileset
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import deprecated_conditional
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PayloadField, PrimitiveField, combine_hashes
from pants.base.validation import assert_list
from pants.build_graph.target import Target
from pants.fs import archive as Archive
from pants.source.wrapped_globs import FilesetWithSpec
from pants.util.dirutil import fast_relpath
from pants.util.memo import memoized_property
class RelativeToMapper(object):
"""A mapper that maps filesystem paths specified relative to a base directory."""
def __init__(self, base):
"""The base directory paths should be mapped from."""
self.base = base
def __call__(self, path):
return os.path.relpath(path, self.base)
def __repr__(self):
return 'IdentityMapper({})'.format(self.base)
def __hash__(self):
return hash(self.base)
class DirectoryReMapper(object):
"""A mapper that maps files relative to a base directory into a destination directory."""
class NonexistentBaseError(Exception):
pass
def __init__(self, base, dest):
"""The base directory files should be mapped from, and the dest they should be mapped to.
:param string base: the relative path to get_buildroot()
:param string dest: the dest path in the bundle
"""
self.base = os.path.abspath(os.path.join(get_buildroot(), base))
if not os.path.isdir(self.base):
raise DirectoryReMapper.NonexistentBaseError(
'Could not find a directory to bundle relative to {0}'.format(self.base))
self.dest = dest
def __call__(self, path):
return os.path.join(self.dest, os.path.relpath(path, self.base))
def __repr__(self):
return 'DirectoryReMapper({0}, {1})'.format(self.base, self.dest)
class BundleProps(namedtuple('_BundleProps', ['rel_path', 'mapper', 'fileset'])):
@memoized_property
def filemap(self):
filemap = OrderedDict()
if self.fileset is not None:
paths = self.fileset() if isinstance(self.fileset, Fileset) \
else self.fileset if hasattr(self.fileset, '__iter__') \
else [self.fileset]
for path in paths:
abspath = path
if not os.path.isabs(abspath):
abspath = os.path.join(get_buildroot(), self.rel_path, path)
filemap[abspath] = self.mapper(abspath)
return filemap
def __hash__(self):
# Leave out fileset from hash calculation since it may not be hashable.
return hash((self.rel_path, self.mapper))
class Bundle(object):
"""A set of files to include in an application bundle.
To learn about application bundles, see
`bundles <JVMProjects.html#jvm-bundles>`_.
Looking for Java-style resources accessible via the ``Class.getResource`` API?
Those are `resources <build_dictionary.html#resources>`_.
Files added to the bundle will be included when bundling an application target.
By default relative paths are preserved. For example, to include ``config``
and ``scripts`` directories: ::
bundles=[
bundle(fileset=[rglobs('config/*', 'scripts/*'), 'my.cfg']),
]
To include files relative to some path component use the ``relative_to`` parameter.
The following places the contents of ``common/config`` in a ``config`` directory
in the bundle. ::
bundles=[
bundle(relative_to='common', fileset=globs('common/config/*'))
]
"""
def __init__(self, parse_context):
self._rel_path = parse_context.rel_path
def __call__(self, rel_path=None, mapper=None, relative_to=None, fileset=None):
"""
:param rel_path: Base path of the "source" file paths. By default, path of the
BUILD file. Useful for assets that don't live in the source code repo.
:param mapper: Function that takes a path string and returns a path string. Takes a path in
the source tree, returns a path to use in the resulting bundle. By default, an identity
mapper.
:param string relative_to: Set up a simple mapping from source path to bundle path.
:param fileset: The set of files to include in the bundle. A string filename, or list of
filenames, or a Fileset object (e.g. globs()).
E.g., ``relative_to='common'`` removes that prefix from all files in the application bundle.
"""
deprecated_conditional(lambda: fileset is None,
'1.2.0',
'bare bundle() without `fileset=` param',
"Pass the `fileset=` parameter: `bundle(fileset=globs('*.config')`")
if mapper and relative_to:
raise ValueError("Must specify exactly one of 'mapper' or 'relative_to'")
if rel_path and isinstance(fileset, FilesetWithSpec):
raise ValueError("Must not use a glob for 'fileset' with 'rel_path'."
" Globs are eagerly evaluated and ignore 'rel_path'.")
# A fileset is either a glob, a string or a list of strings.
if isinstance(fileset, FilesetWithSpec):
pass
elif isinstance(fileset, six.string_types):
fileset = [fileset]
else:
fileset = assert_list(fileset, key_arg='fileset')
real_rel_path = rel_path or self._rel_path
if relative_to:
base = os.path.join(get_buildroot(), real_rel_path, relative_to)
mapper = RelativeToMapper(base)
else:
mapper = mapper or RelativeToMapper(os.path.join(get_buildroot(), real_rel_path))
return BundleProps(real_rel_path, mapper, fileset)
class BundleField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of Bundle instances.
"""
@staticmethod
def _hash_bundle(bundle):
hasher = sha1()
hasher.update(bundle.rel_path)
for abs_path in sorted(bundle.filemap.keys()):
buildroot_relative_path = os.path.relpath(abs_path, get_buildroot())
hasher.update(buildroot_relative_path)
hasher.update(bundle.filemap[abs_path])
if os.path.isfile(abs_path):
# Update with any additional string to differentiate empty file with non-existing file.
hasher.update('e')
with open(abs_path, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
def _compute_fingerprint(self):
return combine_hashes(map(BundleField._hash_bundle, self))
class JvmApp(Target):
"""A deployable JVM application.
Invoking the ``bundle`` goal on one of these targets creates a
self-contained artifact suitable for deployment on some other machine.
The artifact contains the executable jar, its dependencies, and
extra files like config files, startup scripts, etc.
:API: public
"""
class InvalidArchiveType(Exception):
"""Raised when archive type defined in Target is invalid"""
def __init__(self,
name=None,
payload=None,
binary=None,
bundles=None,
basename=None,
deployjar=None,
archive=None,
**kwargs):
"""
:param string binary: Target spec of the ``jvm_binary`` that contains the
app main.
:param bundles: One or more ``bundle``\s
describing "extra files" that should be included with this app
(e.g.: config files, startup scripts).
:param string basename: Name of this application, if different from the
``name``. Optionally pants uses this in the ``bundle`` goal to name the distribution
artifact. Note this is unsafe because of the possible conflict when multiple bundles
are built.
:param boolean deployjar: If True, pack all 3rdparty and internal jar classfiles into
a single deployjar in the bundle's root dir. If unset, all jars will go into the
bundle's libs directory, the root will only contain a synthetic jar with its manifest's
Class-Path set to those jars.
:param string archive: Create an archive of this type from the bundle.
"""
if archive and archive not in Archive.TYPE_NAMES:
raise self.InvalidArchiveType(
'Given archive type "{}" is invalid, choose from {}.'.format(archive, list(Archive.TYPE_NAMES)))
payload = payload or Payload()
payload.add_fields({
'basename': PrimitiveField(basename or name),
'binary': PrimitiveField(binary),
'bundles': BundleField(bundles or []),
'deployjar': PrimitiveField(deployjar),
'archive': PrimitiveField(archive),
})
super(JvmApp, self).__init__(name=name, payload=payload, **kwargs)
if name == basename:
raise TargetDefinitionException(self, 'basename must not equal name.')
def globs_relative_to_buildroot(self):
buildroot = get_buildroot()
globs = []
for bundle in self.bundles:
fileset = bundle.fileset
if fileset is None:
continue
elif hasattr(fileset, 'filespec'):
globs += bundle.fileset.filespec['globs']
else:
# NB(nh): filemap is an OrderedDict, so this ordering is stable.
globs += [fast_relpath(f, buildroot) for f in bundle.filemap.keys()]
super_globs = super(JvmApp, self).globs_relative_to_buildroot()
if super_globs:
globs += super_globs['globs']
return {'globs': globs}
@property
def traversable_dependency_specs(self):
for spec in super(JvmApp, self).traversable_dependency_specs:
yield spec
if self.payload.binary:
yield self.payload.binary
@property
def basename(self):
return self.payload.basename
@property
def bundles(self):
return self.payload.bundles
@property
def binary(self):
""":returns: The JvmBinary instance this JvmApp references.
:rtype: JvmBinary
"""
dependencies = self.dependencies
if len(dependencies) != 1:
raise TargetDefinitionException(self, 'A JvmApp must define exactly one JvmBinary '
'dependency, have: {}'.format(dependencies))
binary = dependencies[0]
if not isinstance(binary, JvmBinary):
raise TargetDefinitionException(self, 'Expected JvmApp binary dependency to be a JvmBinary '
'target, found {}'.format(binary))
return binary
@property
def jar_dependencies(self):
return self.binary.jar_dependencies
| apache-2.0 |
sricharanaz/iommu | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 |
runekaagaard/django-contrib-locking | django/contrib/gis/tests/geoapp/tests.py | 1 | 40591 | from __future__ import unicode_literals
import re
from tempfile import NamedTemporaryFile
from django.db import connection
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import no_oracle, oracle, postgis, spatialite
from django.core.management import call_command
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOS:
from django.contrib.gis.geos import (fromstr, GEOSGeometry,
Point, LineString, LinearRing, Polygon, GeometryCollection)
from .models import Country, City, PennsylvaniaCity, State, Track, NonConcreteModel, Feature, MinusOneSRID
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnlessDBFeature("gis_enabled")
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
## Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
## Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(
# SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157))
# )
# FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
# Used ogr.py in gdal 1.4.1 for this transform
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)'
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
if spatialite and connection.ops.spatial_version < (3, 0, 0):
# SpatiaLite < 3 does not support missing SRID values.
return
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = six.StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with NamedTemporaryFile(mode='w', suffix='.json') as tempfile:
tempfile.write(result)
tempfile.seek(0)
call_command('loaddata', tempfile.name, verbosity=0)
self.assertListEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("gis_enabled")
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not (spatialite and connection.ops.spatial_version < (3, 0, 0)):
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
self.skipTest("PostGIS 2.0/2.0.1 left and right lookups are known to be buggy.")
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnlessDBFeature("gis_enabled")
class GeoQuerySetTest(TestCase):
fixtures = ['initial']
# Please keep the tests in GeoQuerySet method's alphabetic order
@skipUnlessDBFeature("has_centroid_method")
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertTrue(s.poly.centroid.equals_exact(s.centroid, tol))
@skipUnlessDBFeature(
"has_difference_method", "has_intersection_method",
"has_sym_difference_method", "has_union_method")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwey with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_envelope_method")
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("supports_extent_aggr")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_extent(self):
"""
Testing the (deprecated) `extent` GeoQuerySet method and the Extent
aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent1 = qs.extent()
extent2 = qs.aggregate(Extent('point'))['point__extent']
for extent in (extent1, extent2):
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).extent())
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
@skipUnlessDBFeature("has_force_rhr_method")
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@skipUnlessDBFeature("has_geohash_method")
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_gml_method")
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn('<gml:pos srsDimension="2">', City.objects.gml(version=3).get(name='Pueblo').gml)
@skipUnlessDBFeature("has_kml_method")
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_make_line(self):
"""
Testing the (deprecated) `make_line` GeoQuerySet method and the MakeLine
aggregate.
"""
if not connection.features.supports_make_line_aggr:
# Only PostGIS has support for the MakeLine aggregate. For other
# backends, test that NotImplementedError is raised
self.assertRaises(
NotImplementedError,
City.objects.all().aggregate, MakeLine('point')
)
return
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line1 = City.objects.make_line()
line2 = City.objects.aggregate(MakeLine('point'))['point__makeline']
for line in (line1, line2):
self.assertTrue(ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line))
@skipUnlessDBFeature("has_num_geom_method")
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0
# will return None.
if postgis and connection.ops.spatial_version < (2, 0, 0):
self.assertIsNone(c.num_geom)
else:
self.assertEqual(1, c.num_geom)
@skipUnlessDBFeature("supports_num_points_poly")
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_point_on_surface_method")
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertTrue(ref[c.name].equals_exact(c.point_on_surface, tol))
@skipUnlessDBFeature("has_reverse_method")
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@skipUnlessDBFeature("has_scale_method")
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@skipUnlessDBFeature("has_snap_to_grid_method")
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid,
tol
)
)
@skipUnlessDBFeature("has_svg_method")
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_transform_method")
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@skipUnlessDBFeature("has_translate_method")
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@skipUnlessDBFeature("has_unionagg_method")
@no_oracle
@ignore_warnings(category=RemovedInDjango20Warning)
def test_unionagg(self):
"""
Testing the (deprecated) `unionagg` (aggregate union) GeoQuerySet method
and the Union aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
self.assertRaises(ValueError, qs.aggregate, Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
u3 = qs.aggregate(Union('point'))['point__union']
u4 = qs.order_by('name').aggregate(Union('point'))['point__union']
tol = 0.00001
self.assertTrue(union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertTrue(union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
self.assertTrue(union1.equals_exact(u3, tol) or union2.equals_exact(u3, tol))
self.assertTrue(union1.equals_exact(u4, tol) or union2.equals_exact(u4, tol))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.unionagg(field_name='point'))
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
| bsd-3-clause |
patrick-brian-mooney/UlyssesRedux | utility_scripts/get-chapter-7-stats.py | 1 | 1619 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Produces a quick .csv file summarizing number of sentences in each paragraph
in this particular chapter, encoding basic facts about structure that are
relevant to the script 7.py. Makes a lot of assumptions about the structure of
the text file it's processing, including the assumption of one paragraph per
line with no blank lines.
usage:
./get_chapter_7_stats.py
This program is licensed under the GPL v3 or, at your option, any later
version. See the file LICENSE.md for a copy of this licence.
"""
import sys, os, re
sys.path.append('/UlyssesRedux/scripts/')
from directory_structure import * # Gets us the listing of file and directory locations.
ch7_text = open(aeolus_base_text_path).readlines()
the_stats_file = open(aeolus_stats_path, 'w')
for the_line in [ which_line.strip() for which_line in ch7_text if len(which_line.strip()) > 0 ]:
num_sents = len(list(filter(None, re.split("[!?.]+", the_line))))
num_tokens = len(the_line.split(' '))
if the_line.strip().upper() == the_line.strip(): # It's a headline
the_stats_file.write('H') # indicate a header by beginning with H
elif the_line.startswith("--") or the_line.startswith("—"): # It's a line of dialogue
the_stats_file.write('—') # indicates a quote beginning with an em dash
else:
the_stats_file.write(' ') # or just begin with a space for non-headline, non-quoted text.
the_stats_file.write('%d,%d\n' % (num_sents, num_tokens)) # Line format: # of sentences + newline
the_stats_file.close() | gpl-3.0 |
rvalyi/OpenUpgrade | addons/report_webkit/__openerp__.py | 89 | 4029 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'Webkit Report Engine',
'description': """
This module adds a new Report Engine based on WebKit library (wkhtmltopdf) to support reports designed in HTML + CSS.
=====================================================================================================================
The module structure and some code is inspired by the report_openoffice module.
The module allows:
------------------
- HTML report definition
- Multi header support
- Multi logo
- Multi company support
- HTML and CSS-3 support (In the limit of the actual WebKIT version)
- JavaScript support
- Raw HTML debugger
- Book printing capabilities
- Margins definition
- Paper size definition
Multiple headers and logos can be defined per company. CSS style, header and
footer body are defined per company.
For a sample report see also the webkit_report_sample module, and this video:
http://files.me.com/nbessi/06n92k.mov
Requirements and Installation:
------------------------------
This module requires the ``wkthtmltopdf`` library to render HTML documents as
PDF. Version 0.9.9 or later is necessary, and can be found at
http://code.google.com/p/wkhtmltopdf/ for Linux, Mac OS X (i386) and Windows (32bits).
After installing the library on the OpenERP Server machine, you may need to set
the path to the ``wkthtmltopdf`` executable file in a system parameter named
``webkit_path`` in Settings -> Customization -> Parameters -> System Parameters
If you are experiencing missing header/footer problems on Linux, be sure to
install a 'static' version of the library. The default ``wkhtmltopdf`` on
Ubuntu is known to have this issue.
TODO:
-----
* JavaScript support activation deactivation
* Collated and book format support
* Zip return for separated PDF
* Web client WYSIWYG
""",
'version': '0.9',
'depends': ['base','report'],
'author': 'Camptocamp',
'category': 'Reporting', # i.e a technical module, not shown in Application install menu
'url': 'http://http://www.camptocamp.com/',
'data': [ 'security/ir.model.access.csv',
'data.xml',
'wizard/report_webkit_actions_view.xml',
'company_view.xml',
'header_view.xml',
'ir_report_view.xml',
],
'demo': [
"report/webkit_report_demo.xml",
],
'test': [
"test/print.yml",
],
'installable': True,
'auto_install': False,
'images': ['images/companies_webkit.jpeg','images/header_html.jpeg','images/header_img.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
olegpshenichniy/Booktype | lib/booki/messaging/migrations/0001_initial.py | 7 | 3743 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import booki.messaging.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Endpoint',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('syntax', models.CharField(unique=True, max_length=2500, verbose_name='syntax')),
],
options={
'verbose_name': 'Endpoint',
'verbose_name_plural': 'Endpoints',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EndpointConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('notification_filter', models.CharField(max_length=2500, verbose_name='notification filter', blank=True)),
],
options={
'verbose_name': 'Endpoint config',
'verbose_name_plural': 'Endpoint configs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Following',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('follower', models.ForeignKey(related_name=b'follower', verbose_name='follower', to='messaging.Endpoint')),
('target', models.ForeignKey(related_name=b'target', verbose_name='target', to='messaging.Endpoint')),
],
options={
'verbose_name': 'Following',
'verbose_name_plural': 'Followings',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now=True, verbose_name='timestamp')),
('content', models.TextField(verbose_name='content')),
('attachment', models.FileField(upload_to=booki.messaging.models.uploadAttachmentTo, max_length=2500, verbose_name='attachment')),
('snippet', models.TextField(verbose_name='snippet')),
('context_url', models.TextField(verbose_name='context')),
('sender', models.ForeignKey(verbose_name='sender', to='messaging.Endpoint')),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PostAppearance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(verbose_name='timestamp')),
('endpoint', models.ForeignKey(verbose_name='endpoint', to='messaging.Endpoint')),
('post', models.ForeignKey(verbose_name='post', to='messaging.Post')),
],
options={
'verbose_name': 'Post appearance',
'verbose_name_plural': 'Post appearances',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='endpoint',
name='config',
field=models.ForeignKey(null=True, blank=True, to='messaging.EndpointConfig', unique=True),
preserve_default=True,
),
]
| agpl-3.0 |
gowiden/shadowsocks | shadowsocks/asyncdns.py | 8 | 16614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype, request_id):
header = struct.pack('!HBBHHHH', request_id, 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
return None
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
socket.inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self):
self._loop = None
self._request_id = 1
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._last_time = time.time()
self._sock = None
self._servers = None
self._parse_resolv()
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if os.environ.__contains__('WINDIR'):
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN)
loop.add_handler(self.handle_events, ref=False)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if self._cb_to_hostname.__contains__(callback):
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if self._hostname_to_cb.__contains__(hostname):
del self._hostname_to_cb[hostname]
if self._hostname_status.__contains__(hostname):
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_events(self, events):
for sock, fd, event in events:
if sock != self._sock:
continue
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
break
self._handle_data(data)
break
now = time.time()
if now - self._last_time > CACHE_SWEEP_INTERVAL:
self._cache.sweep()
self._last_time = now
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if self._hostname_status.__contains__(hostname):
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
self._request_id += 1
if self._request_id > 32768:
self._request_id = 1
req = build_request(hostname, qtype, self._request_id)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if not hostname:
callback(None, Exception('empty hostname'))
elif is_ip(hostname):
callback((hostname, hostname), None)
elif self._hosts.__contains__(hostname):
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif self._cache.__contains__(hostname):
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
self._sock.close()
self._sock = None
| mit |
j0nathan33/CouchPotatoServer | couchpotato/core/plugins/quality/main.py | 9 | 23606 | from math import fabs, ceil
import traceback
import re
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString, tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.quality.index import QualityIndex
log = CPLog(__name__)
class QualityPlugin(Plugin):
_database = {
'quality': QualityIndex
}
qualities = [
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'median_size': 40000, 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']},
{'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'median_size': 10000, 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'median_size': 5500, 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'median_size': 2000, 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip'), 'hdtv', 'hdrip'], 'allow': ['720p', '1080p'], 'ext':['mp4', 'avi'], 'tags': ['webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []},
{'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = {
'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'],
'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'],
'3d': ['2d3d', '3d2d','3d','sbs','3dbd','hsbs'],
}
cached_qualities = None
cached_order = None
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addEvent('quality.order', self.getOrder)
addEvent('quality.ishigher', self.isHigher)
addEvent('quality.isfinish', self.isFinish)
addEvent('quality.fill', self.fill)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
'desc': 'List all available qualities',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, qualities
}"""}
})
addEvent('app.initialize', self.fill, priority = 10)
addEvent('app.test', self.doTest)
self.order = []
self.addOrder()
def addOrder(self):
self.order = []
for q in self.qualities:
self.order.append(q.get('identifier'))
def getOrder(self):
return self.order
def preReleases(self):
return self.pre_releases
def allView(self, **kwargs):
return {
'success': True,
'list': self.all()
}
def all(self):
if self.cached_qualities:
return self.cached_qualities
db = get_db()
temp = []
for quality in self.qualities:
quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc']
q = mergeDicts(quality, quality_doc)
temp.append(q)
if len(temp) == len(self.qualities):
self.cached_qualities = temp
return temp
def single(self, identifier = ''):
db = get_db()
quality_dict = {}
quality = db.get('quality', identifier, with_doc = True)['doc']
if quality:
quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality)
return quality_dict
def getQuality(self, identifier):
for q in self.qualities:
if identifier == q.get('identifier'):
return q
def saveSize(self, **kwargs):
try:
db = get_db()
quality = db.get('quality', kwargs.get('identifier'), with_doc = True)
if quality:
quality['doc'][kwargs.get('value_type')] = tryInt(kwargs.get('value'))
db.update(quality['doc'])
self.cached_qualities = None
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def fill(self):
try:
db = get_db()
order = 0
for q in self.qualities:
existing = None
try:
existing = db.get('quality', q.get('identifier'))
except RecordNotFound:
pass
if not existing:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
order += 1
return True
except:
log.error('Failed: %s', traceback.format_exc())
return False
def guess(self, files, extra = None, size = None, use_cache = True):
if not extra: extra = {}
# Create hash for cache
cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files])
if use_cache:
cached = self.getCache(cache_key)
if cached and len(extra) == 0:
return cached
qualities = self.all()
# Start with 0
score = {}
for quality in qualities:
score[quality.get('identifier')] = {
'score': 0,
'3d': {}
}
# Use metadata titles as extra check
if extra and extra.get('titles'):
files.extend(extra.get('titles'))
for cur_file in files:
words = re.split('\W+', cur_file.lower())
name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True)
threed_words = words
if name_year and name_year.get('name'):
split_name = splitString(name_year.get('name'), ' ')
threed_words = [x for x in words if x not in split_name]
for quality in qualities:
contains_score = self.containsTagScore(quality, words, cur_file)
threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None)
self.calcScore(score, quality, contains_score, threedscore, penalty = contains_score)
size_scores = []
for quality in qualities:
# Evaluate score based on size
size_score = self.guessSizeScore(quality, size = size)
loose_score = self.guessLooseScore(quality, extra = extra)
if size_score > 0:
size_scores.append(quality)
self.calcScore(score, quality, size_score + loose_score)
# Add additional size score if only 1 size validated
if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 8)
del size_scores
# Return nothing if all scores are <= 0
has_non_zero = 0
for s in score:
if score[s]['score'] > 0:
has_non_zero += 1
if not has_non_zero:
return None
heighest_quality = max(score, key = lambda p: score[p]['score'])
if heighest_quality:
for quality in qualities:
if quality.get('identifier') == heighest_quality:
quality['is_3d'] = False
if score[heighest_quality].get('3d'):
quality['is_3d'] = True
return self.setCache(cache_key, quality)
return None
def containsTagScore(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
score = 0.0
extension = words[-1]
words = words[:-1]
points = {
'identifier': 20,
'label': 20,
'alternative': 20,
'tags': 11,
'ext': 5,
}
scored_on = []
# Check alt and tags
for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, [])
qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities
for alt in qualities:
if isinstance(alt, tuple):
if len(set(words) & set(alt)) == len(alt):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on:
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
# Don't score twice on same tag
scored_on.append(ss(alt).lower())
# Check extention
for ext in quality.get('ext', []):
if ext == extension:
log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file))
score += points['ext']
return score
def contains3D(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
for key in self.threed_tags:
tags = self.threed_tags.get(key, [])
for tag in tags:
if isinstance(tag, tuple):
if len(set(words) & set(tag)) == len(tag):
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
elif tag in words:
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
return 0, None
def guessLooseScore(self, quality, extra = None):
score = 0
if extra:
# Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
score += 10
# Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
score += 5
if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Add point for correct dvdrip resolutions')
score += 1
return score
def guessSizeScore(self, quality, size = None):
score = 0
if size:
size = tryFloat(size)
size_min = tryFloat(quality['size_min'])
size_max = tryFloat(quality['size_max'])
if size_min <= size <= size_max:
log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], size_min, size, size_max))
proc_range = size_max - size_min
size_diff = size - size_min
size_proc = (size_diff / proc_range)
median_diff = quality['median_size'] - size_min
median_proc = (median_diff / proc_range)
max_points = 8
score += ceil(max_points - (fabs(size_proc - median_proc) * max_points))
else:
score -= 5
return score
def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = 0):
score[quality['identifier']]['score'] += add_score
threedscore, threedtag = threedscore
if threedscore and threedtag:
if threedscore not in score[quality['identifier']]['3d']:
score[quality['identifier']]['3d'][threedtag] = 0
score[quality['identifier']]['3d'][threedtag] += threedscore
# Set order for allow calculation (and cache)
if not self.cached_order:
self.cached_order = {}
for q in self.qualities:
self.cached_order[q.get('identifier')] = self.qualities.index(q)
if penalty and add_score != 0:
for allow in quality.get('allow', []):
score[allow]['score'] -= ((penalty * 2) if self.cached_order[allow] < self.cached_order[quality['identifier']] else penalty) * 2
# Give panelty for all other qualities
for q in self.qualities:
if quality.get('identifier') != q.get('identifier') and score.get(q.get('identifier')):
score[q.get('identifier')]['score'] -= 1
def isFinish(self, quality, profile, release_age = 0):
if not isinstance(profile, dict) or not profile.get('qualities'):
# No profile so anything (scanned) is good enough
return True
try:
index = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else False) == bool(quality.get('is_3d', False))][0]
if index == 0 or (profile['finish'][index] and int(release_age) >= int(profile.get('stop_after', [0])[0])):
return True
return False
except:
return False
def isHigher(self, quality, compare_with, profile = None):
if not isinstance(profile, dict) or not profile.get('qualities'):
profile = fireEvent('profile.default', single = True)
# Try to find quality in profile, if not found: a quality we do not want is lower than anything else
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
except:
log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'lower'
# Try to find compare quality in profile, if not found: anything is higher than a not wanted quality
try:
compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0]
except:
log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'higher'
# Note to self: a lower number means higher quality
if quality_order > compare_order:
return 'lower'
elif quality_order == compare_order:
return 'equal'
else:
return 'higher'
def doTest(self):
tests = {
'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'},
'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'},
'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'},
'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'},
'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'},
'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'},
'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'},
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'},
'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'},
'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'},
'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'},
'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True},
'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'},
'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True},
'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'},
'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'},
'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True},
'/home/namehou/Movie Monuments (2012)/Movie Monuments.mkv': {'size': 5500, 'quality': '720p', 'is_3d': False},
'/home/namehou/Movie Monuments (2012)/Movie Monuments Full-OU.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True},
'/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': False},
'/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/Moviename/Moviename (2009).ts': {'size': 7500, 'quality': '1080p'},
'/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True},
'Moviename 2014 720p HDCAM XviD DualAudio': {'size': 4000, 'quality': 'cam'},
'Moviename (2014) - 720p CAM x264': {'size': 2250, 'quality': 'cam'},
'Movie Name (2014).mp4': {'size': 750, 'quality': 'brrip'},
'Moviename.2014.720p.R6.WEB-DL.x264.AC3-xyz': {'size': 750, 'quality': 'r5'},
'Movie name 2014 New Source 720p HDCAM x264 AC3 xyz': {'size': 750, 'quality': 'cam'},
'Movie.Name.2014.720p.HD.TS.AC3.x264': {'size': 750, 'quality': 'ts'},
'Movie.Name.2014.1080p.HDrip.x264.aac-ReleaseGroup': {'size': 7000, 'quality': 'brrip'},
'Movie.Name.2014.HDCam.Chinese.Subs-ReleaseGroup': {'size': 15000, 'quality': 'cam'},
'Movie Name 2014 HQ DVDRip X264 AC3 (bla)': {'size': 0, 'quality': 'dvdrip'},
'Movie Name1 (2012).mkv': {'size': 4500, 'quality': '720p'},
'Movie Name (2013).mkv': {'size': 8500, 'quality': '1080p'},
'Movie Name (2014).mkv': {'size': 4500, 'quality': '720p', 'extra': {'titles': ['Movie Name 2014 720p Bluray']}},
'Movie Name (2015).mkv': {'size': 500, 'quality': '1080p', 'extra': {'resolution_width': 1920}},
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'},
'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'},
'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'},
}
correct = 0
for name in tests:
test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None), use_cache = False) or {}
success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False)
if not success:
log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name,
test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''),
tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '')
))
correct += success
if correct == len(tests):
log.info('Quality test successful')
return True
else:
log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))
| gpl-3.0 |
xukunfeng/ardupilot | Tools/autotest/pysim/aircraft.py | 164 | 4001 | import math, util, rotmat, time
import random
from rotmat import Vector3, Matrix3
class Aircraft(object):
'''a basic aircraft class'''
def __init__(self):
self.home_latitude = 0
self.home_longitude = 0
self.home_altitude = 0
self.ground_level = 0
self.frame_height = 0.0
self.latitude = self.home_latitude
self.longitude = self.home_longitude
self.altitude = self.home_altitude
self.dcm = Matrix3()
# rotation rate in body frame
self.gyro = Vector3(0,0,0) # rad/s
self.velocity = Vector3(0, 0, 0) # m/s, North, East, Down
self.position = Vector3(0, 0, 0) # m North, East, Down
self.mass = 0.0
self.update_frequency = 50 # in Hz
self.gravity = 9.80665 # m/s/s
self.accelerometer = Vector3(0, 0, -self.gravity)
self.wind = util.Wind('0,0,0')
self.time_base = time.time()
self.time_now = self.time_base + 100*1.0e-6
self.gyro_noise = math.radians(0.1)
self.accel_noise = 0.3
def on_ground(self, position=None):
'''return true if we are on the ground'''
if position is None:
position = self.position
return (-position.z) + self.home_altitude <= self.ground_level + self.frame_height
def update_position(self):
'''update lat/lon/alt from position'''
bearing = math.degrees(math.atan2(self.position.y, self.position.x))
distance = math.sqrt(self.position.x**2 + self.position.y**2)
(self.latitude, self.longitude) = util.gps_newpos(self.home_latitude, self.home_longitude,
bearing, distance)
self.altitude = self.home_altitude - self.position.z
velocity_body = self.dcm.transposed() * self.velocity
self.accelerometer = self.accel_body.copy()
def set_yaw_degrees(self, yaw_degrees):
'''rotate to the given yaw'''
(roll, pitch, yaw) = self.dcm.to_euler()
yaw = math.radians(yaw_degrees)
self.dcm.from_euler(roll, pitch, yaw)
def time_advance(self, deltat):
'''advance time by deltat in seconds'''
self.time_now += deltat
def setup_frame_time(self, rate, speedup):
'''setup frame_time calculation'''
self.rate = rate
self.speedup = speedup
self.frame_time = 1.0/rate
self.scaled_frame_time = self.frame_time/speedup
self.last_wall_time = time.time()
self.achieved_rate = rate
def adjust_frame_time(self, rate):
'''adjust frame_time calculation'''
self.rate = rate
self.frame_time = 1.0/rate
self.scaled_frame_time = self.frame_time/self.speedup
def sync_frame_time(self):
'''try to synchronise simulation time with wall clock time, taking
into account desired speedup'''
now = time.time()
if now < self.last_wall_time + self.scaled_frame_time:
time.sleep(self.last_wall_time+self.scaled_frame_time - now)
now = time.time()
if now > self.last_wall_time and now - self.last_wall_time < 0.1:
rate = 1.0/(now - self.last_wall_time)
self.achieved_rate = (0.98*self.achieved_rate) + (0.02*rate)
if self.achieved_rate < self.rate*self.speedup:
self.scaled_frame_time *= 0.999
else:
self.scaled_frame_time *= 1.001
self.last_wall_time = now
def add_noise(self, throttle):
'''add noise based on throttle level (from 0..1)'''
self.gyro += Vector3(random.gauss(0, 1),
random.gauss(0, 1),
random.gauss(0, 1)) * throttle * self.gyro_noise
self.accel_body += Vector3(random.gauss(0, 1),
random.gauss(0, 1),
random.gauss(0, 1)) * throttle * self.accel_noise
| gpl-3.0 |
BT-jmichaud/sale-workflow | __unported__/sale_multi_picking/sale.py | 37 | 2758 | # -*- coding: utf-8 -*-
#
#
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, orm
class sale_order_line_group(orm.Model):
_name = 'sale.order.line.group'
_columns = {
'name': fields.char('Group', size=64, required=True),
'company_id': fields.many2one(
'res.company', 'Company', required=True, select=1),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get(
'res.company')._company_default_get(
cr, uid, 'sale.order.line.group', context=c),
}
class sale_order_line(orm.Model):
_inherit = 'sale.order.line'
_columns = {
'picking_group_id': fields.many2one(
'sale.order.line.group', 'Group',
help="This is used by 'multi-picking' to group order lines in one "
"picking"),
}
class sale_order(orm.Model):
_inherit = 'sale.order'
def action_ship_create(self, cr, uid, ids, context=None):
picking_pool = self.pool.get('stock.picking')
for order in self.browse(cr, uid, ids, context=context):
lines_by_group = {}
for line in order.order_line:
group_id = (
line.picking_group_id.id if line.picking_group_id else 0)
lines_by_group.setdefault(group_id, []).append(line)
for group in lines_by_group:
if not group:
picking_id = None
else:
picking_vals = super(
sale_order, self)._prepare_order_picking(
cr, uid, order, context=context)
picking_id = picking_pool.create(
cr, uid, picking_vals, context=context)
super(sale_order, self)._create_pickings_and_procurements(
cr, uid, order, lines_by_group[group], picking_id,
context=context)
return True
| agpl-3.0 |
NetApp/cinder | cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py | 6 | 26929 | # (c) Copyright 2016 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for brcd fc zone client http(s)."""
import time
import mock
from mock import patch
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_http_fc_zone_client
as client)
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
cfgs = {'openstack_cfg': 'zone1;zone2'}
cfgs_to_delete = {
'openstack_cfg': 'zone1;zone2;openstack50060b0000c26604201900051ee8e329'}
zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
zones_to_delete = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
'openstack50060b0000c26604201900051ee8e329':
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'}
alias = {}
qlps = {}
ifas = {}
parsed_raw_zoneinfo = ""
random_no = ''
session = None
active_cfg = 'openstack_cfg'
activate = True
no_activate = False
vf_enable = True
ns_info = ['10:00:00:05:1e:7c:64:96']
nameserver_info = """
<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>NSInfo Page</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN NS INFO
2;8;020800;N ;10:00:00:05:1e:7c:64:96;20:00:00:05:1e:7c:64:96;[89]""" \
"""Brocade-825 | 3.0.4.09 | DCM-X3650-94 | Microsoft Windows Server 2003 R2"""\
"""| Service Pack 2";FCP ; 3;20:08:00:05:1e:89:54:a0;"""\
"""0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0;000000;port8"""\
"""
--END NS INFO
</PRE>
</BODY>
</HTML>
"""
mocked_zone_string = 'zonecfginfo=openstack_cfg zone1;zone2 '\
'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\
'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\
'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\
'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'openstack_cfg null &saveonly=false'
mocked_zone_string_no_activate = 'zonecfginfo=openstack_cfg zone1;zone2 '\
'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\
'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\
'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\
'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\
'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c &saveonly=true'
zone_string_to_post = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"openstack50060b0000c26604201900051ee8e329 "\
"50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\
"openstack_cfg null &saveonly=false"
zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"openstack50060b0000c26604201900051ee8e329 "\
"50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 &saveonly=true"
zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\
"zone1;zone2 openstack50060b0000c26604201900051ee8e329000000000000000000000"\
"00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true"
zone_string_del_to_post = "zonecfginfo=openstack_cfg zone1;zone2"\
" zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"openstack_cfg null &saveonly=false"
zone_string_del_to_post_no_active = "zonecfginfo=openstack_cfg zone1;zone2"\
" zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 &saveonly=true"
zone_post_page = """
<BODY>
<PRE>
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=-1
errorMessage=Name too long
--END ZONE_TXN_INFO
</PRE>
</BODY>"""
zone_post_page_no_error = """
<BODY>
<PRE>
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=0
errorMessage=
--END ZONE_TXN_INFO
</PRE>
</BODY>"""
secinfo_resp = """
<BODY>
<PRE>
--BEGIN SECINFO
SECURITY = OFF
RANDOM = 6281590
DefaultPasswdBitmap = 0
primaryFCS = no
switchType = 66
resource = 10.24.48.210
REALM = FC Switch Administration
AUTHMETHOD = Custom_Basic
hasUpfrontLogin=yes
AUTHVERSION = 1
vfEnabled=false
vfSupported=true
--END SECINFO
</PRE>
</BODY>
"""
authenticate_resp = """<HTML>
<PRE>
--BEGIN AUTHENTICATE
authenticated = yes
username=admin
userrole=admin
adCapable=1
currentAD=AD0
trueADEnvironment=0
adId=0
adList=ALL
contextType=0
--END AUTHENTICATE
</PRE>
</BODY>
"""
un_authenticate_resp = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Authentication</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN AUTHENTICATE
authenticated = no
errCode = -3
authType = Custom_Basic
realm = FC Switch Administration
--END AUTHENTICATE
</PRE>
</BODY>
</HTML>"""
switch_page_resp = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</HEAD>
<BODY>
<PRE>
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
--END SWITCH INFORMATION
</PRE>
</BODY>
</HTML>
"""
switch_page_invalid_firm = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
</HEAD>
<BODY>
<PRE>
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v6.1.1
swDomain=2
--END SWITCH INFORMATION
</PRE>
</BODY>
</HTML>
"""
parsed_value = """
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
"""
parsed_session_info_vf = """
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
"""
session_info_vf = """
<BODY>
<PRE>
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
</PRE>
</BODY>
"""
session_info_vf_not_changed = """
<BODY>
<PRE>
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
User-Agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=128
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
</PRE>
</BODY>
"""
session_info_AD = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Webtools Session Info</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN SESSION
sessionId=-2096740776
user=
userRole=root
isAdminRole=No
authSource=0
sessionIp=
User-Agent=
valid=no
adName=
adId=0
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=1
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=-2
err=Could not obtain session data from store
contextType=0
--END SESSION
</PRE>
</BODY>
</HTML>
"""
zone_info = """<HTML>
<HEAD>
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
<META HTTP-EQUIV="Expires" CONTENT="-1">
<TITLE>Zone Configuration Information</TITLE>
</HEAD>
<BODY>
<PRE>
--BEGIN ZONE CHANGE
LastZoneChangeTime=1421926251
--END ZONE CHANGE
isZoneTxnSupported=true
ZoneLicense=true
QuickLoopLicense=true
DefZoneStatus=noaccess
McDataDefaultZone=false
McDataSafeZone=false
AvailableZoneSize=1043890
--BEGIN ZONE INFO
openstack_cfg zone1;zone2 """\
"""zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 """\
"""zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 """\
"""alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 """\
"""qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
"""fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
"""openstack_cfg null 1045274"""\
"""--END ZONE INFO
</PRE>
</BODY>
</HTML>
"""
active_zone_set = {
'zones':
{'zone1':
['20:01:00:05:33:0e:96:15', '20:00:00:05:33:0e:93:11'],
'zone2':
['20:01:00:05:33:0e:96:14', '20:00:00:05:33:0e:93:11']},
'active_zone_config': 'openstack_cfg'}
updated_zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
'test_updated_zone':
'20:01:00:05:33:0e:96:10;20:00:00:05:33:0e:93:11'}
updated_cfgs = {'openstack_cfg': 'test_updated_zone;zone1;zone2'}
valid_zone_name = "openstack50060b0000c26604201900051ee8e329"
class TestBrcdHttpFCZoneClient(client.BrcdHTTPFCZoneClient, test.TestCase):
def setUp(self):
self.auth_header = "YWRtaW46cGFzc3dvcmQ6NDM4ODEyNTIw"
self.switch_user = "admin"
self.switch_pwd = "password"
self.protocol = "HTTPS"
self.conn = None
self.alias = {}
self.qlps = {}
self.ifas = {}
self.parsed_raw_zoneinfo = ""
self.random_no = ''
self.session = None
super(TestBrcdHttpFCZoneClient, self).setUp()
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_create_auth_token(self, connect_mock):
connect_mock.return_value = secinfo_resp
self.assertEqual("Custom_Basic YWRtaW46cGFzc3dvcmQ6NjI4MTU5MA==",
self.create_auth_token())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_authenticate(self, connect_mock):
connect_mock.return_value = authenticate_resp
self.assertEqual(
(True, "Custom_Basic YWRtaW46eHh4Og=="), self.authenticate())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_authenticate_failed(self, connect_mock):
connect_mock.return_value = un_authenticate_resp
self.assertRaises(
exception.BrocadeZoningHttpException, self.authenticate)
def test_get_parsed_data(self):
valid_delimiter1 = zone_constant.SWITCHINFO_BEGIN
valid_delimiter2 = zone_constant.SWITCHINFO_END
invalid_delimiter = "--END SWITCH INFORMATION1"
self.assertEqual(parsed_value, self.get_parsed_data(
switch_page_resp, valid_delimiter1, valid_delimiter2))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_parsed_data,
switch_page_resp,
valid_delimiter1,
invalid_delimiter)
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_parsed_data,
switch_page_resp,
invalid_delimiter,
valid_delimiter2)
def test_get_nvp_value(self):
valid_keyname = zone_constant.FIRMWARE_VERSION
invalid_keyname = "swFWVersion1"
self.assertEqual(
"v7.3.0b_rc1_bld06", self.get_nvp_value(parsed_value,
valid_keyname))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_nvp_value,
parsed_value,
invalid_keyname)
def test_get_managable_vf_list(self):
manageable_list = ['2', '128']
self.assertEqual(
manageable_list, self.get_managable_vf_list(session_info_vf))
self.assertRaises(exception.BrocadeZoningHttpException,
self.get_managable_vf_list, session_info_AD)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled')
def test_check_change_vf_context_vf_enabled(self, is_vf_enabled_mock):
is_vf_enabled_mock.return_value = (True, session_info_vf)
self.vfid = None
self.assertRaises(
exception.BrocadeZoningHttpException,
self.check_change_vf_context)
self.vfid = "2"
with mock.patch.object(self, 'change_vf_context') \
as change_vf_context_mock:
self.check_change_vf_context()
change_vf_context_mock.assert_called_once_with(
self.vfid, session_info_vf)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled')
def test_check_change_vf_context_vf_disabled(self, is_vf_enabled_mock):
is_vf_enabled_mock.return_value = (False, session_info_AD)
self.vfid = "128"
self.assertRaises(
exception.BrocadeZoningHttpException,
self.check_change_vf_context)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_change_vf_context_valid(self, connect_mock,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
connect_mock.return_value = session_info_vf
self.assertIsNone(self.change_vf_context("2", session_info_vf))
data = zone_constant.CHANGE_VF.format(vfid="2")
headers = {zone_constant.AUTH_HEADER: self.auth_header}
connect_mock.assert_called_once_with(
zone_constant.POST_METHOD, zone_constant.SESSION_PAGE,
data, headers)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_change_vf_context_vf_not_changed(self,
connect_mock,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
connect_mock.return_value = session_info_vf_not_changed
self.assertRaises(exception.BrocadeZoningHttpException,
self.change_vf_context, "2", session_info_vf)
data = zone_constant.CHANGE_VF.format(vfid="2")
headers = {zone_constant.AUTH_HEADER: self.auth_header}
connect_mock.assert_called_once_with(
zone_constant.POST_METHOD, zone_constant.SESSION_PAGE,
data, headers)
@mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list')
def test_change_vf_context_vfid_not_managaed(self,
get_managable_vf_list_mock):
get_managable_vf_list_mock.return_value = ['2', '128']
self.assertRaises(exception.BrocadeZoningHttpException,
self.change_vf_context, "12", session_info_vf)
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_is_supported_firmware(self, connect_mock):
connect_mock.return_value = switch_page_resp
self.assertTrue(self.is_supported_firmware())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_is_supported_firmware_invalid(self, connect_mock):
connect_mock.return_value = switch_page_invalid_firm
self.assertFalse(self.is_supported_firmware())
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_active_zone_set(self, connect_mock):
connect_mock.return_value = zone_info
returned_zone_map = self.get_active_zone_set()
self.assertDictMatch(active_zone_set, returned_zone_map)
def test_form_zone_string(self):
new_alias = {
'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'}
new_qlps = {'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}
new_ifas = {'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}
self.assertEqual(mocked_zone_string, self.form_zone_string(
cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True))
self.assertEqual(mocked_zone_string_no_activate, self.form_zone_string(
cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, False))
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.add_zones(add_zones_info, True)
post_zone_data_mock.assert_called_once_with(zone_string_to_post)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_invalid_zone_name(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("-1", "Name Too Long")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
invalid_zone_name = valid_zone_name + "00000000000000000000000000"
add_zones_info = {invalid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.assertRaises(
exception.BrocadeZoningHttpException,
self.add_zones, add_zones_info, False)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_add_zones_no_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs.copy()
self.zones = zones.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
self.add_zones(add_zones_info, False)
post_zone_data_mock.assert_called_once_with(
zone_string_to_post_no_activate)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = valid_zone_name
self.delete_zones(delete_zones_info, True)
post_zone_data_mock.assert_called_once_with(zone_string_del_to_post)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_no_activate(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = valid_zone_name
self.delete_zones(delete_zones_info, False)
post_zone_data_mock.assert_called_once_with(
zone_string_del_to_post_no_active)
@patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data')
def test_delete_zones_invalid_zone_name(self, post_zone_data_mock):
post_zone_data_mock.return_value = ("0", "")
self.cfgs = cfgs_to_delete.copy()
self.zones = zones_to_delete.copy()
self.alias = alias.copy()
self.qlps = qlps.copy()
self.ifas = ifas.copy()
self.active_cfg = active_cfg
delete_zones_info = 'openstack50060b0000c26604201900051ee8e32'
self.assertRaises(exception.BrocadeZoningHttpException,
self.delete_zones, delete_zones_info, False)
@patch.object(time, 'sleep')
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_post_zone_data(self, connect_mock, sleep_mock):
connect_mock.return_value = zone_post_page
self.assertEqual(
("-1", "Name too long"), self.post_zone_data(zone_string_to_post))
connect_mock.return_value = zone_post_page_no_error
self.assertEqual(("0", ""), self.post_zone_data(zone_string_to_post))
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_nameserver_info(self, connect_mock):
connect_mock.return_value = nameserver_info
self.assertEqual(ns_info, self.get_nameserver_info())
@patch.object(client.BrcdHTTPFCZoneClient, 'get_session_info')
def test_is_vf_enabled(self, get_session_info_mock):
get_session_info_mock.return_value = session_info_vf
self.assertEqual((True, parsed_session_info_vf), self.is_vf_enabled())
def test_delete_zones_cfgs(self):
cfgs = {'openstack_cfg': 'zone1;zone2'}
zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
delete_zones_info = valid_zone_name
self.assertEqual(
(zones, cfgs, active_cfg),
self.delete_zones_cfgs(
cfgs_to_delete.copy(),
zones_to_delete.copy(),
delete_zones_info,
active_cfg))
cfgs = {'openstack_cfg': 'zone2'}
zones = {'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}
delete_zones_info = valid_zone_name + ";zone1"
self.assertEqual(
(zones, cfgs, active_cfg),
self.delete_zones_cfgs(
cfgs_to_delete.copy(),
zones_to_delete.copy(),
delete_zones_info,
active_cfg))
def test_add_zones_cfgs(self):
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
updated_cfgs = {
'openstack_cfg':
valid_zone_name + ';zone1;zone2'}
updated_zones = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
valid_zone_name:
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'}
self.assertEqual((updated_zones, updated_cfgs, active_cfg),
self.add_zones_cfgs(
cfgs.copy(),
zones.copy(),
add_zones_info,
active_cfg,
"openstack_cfg"))
add_zones_info = {valid_zone_name:
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29'],
'test4':
['20:06:0b:00:00:b2:66:07',
'20:10:00:05:1e:b8:c3:19']
}
updated_cfgs = {
'openstack_cfg':
'test4;openstack50060b0000c26604201900051ee8e329;zone1;zone2'}
updated_zones = {
'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11',
valid_zone_name:
'50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29',
'test4': '20:06:0b:00:00:b2:66:07;20:10:00:05:1e:b8:c3:19'}
self.assertEqual(
(updated_zones, updated_cfgs, active_cfg),
self.add_zones_cfgs(
cfgs.copy(), zones.copy(), add_zones_info,
active_cfg, "openstack_cfg"))
@patch.object(client.BrcdHTTPFCZoneClient, 'connect')
def test_get_zone_info(self, connect_mock):
connect_mock.return_value = zone_info
self.get_zone_info()
self.assertEqual({'openstack_cfg': 'zone1;zone2'}, self.cfgs)
self.assertEqual(
{'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11',
'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'},
self.zones)
self.assertEqual('openstack_cfg', self.active_cfg)
self.assertEqual(
{'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'},
self.alias)
self.assertEqual(
{'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'},
self.ifas)
self.assertEqual(
{'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'},
self.qlps)
| apache-2.0 |
SnakeJenny/TensorFlow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 8 | 42354 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/token.py | 178 | 2944 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
del _name, _value
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
Thunderoar/android_kernel_samsung_goyave3g | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
lahosken/pants | tests/python/pants_test/backend/python/tasks/python_task_test_base.py | 8 | 3057 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.python.register import build_file_aliases as register_python
from pants.build_graph.address import Address
from pants_test.backend.python.tasks.interpreter_cache_test_mixin import InterpreterCacheTestMixin
from pants_test.tasks.task_test_base import TaskTestBase
class PythonTaskTestBase(InterpreterCacheTestMixin, TaskTestBase):
"""
:API: public
"""
@property
def alias_groups(self):
"""
:API: public
"""
return register_python()
def create_python_library(self, relpath, name, source_contents_map=None,
dependencies=(), provides=None):
"""
:API: public
"""
sources = None if source_contents_map is None else ['__init__.py'] + source_contents_map.keys()
sources_strs = ["'{0}'".format(s) for s in sources] if sources else None
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_library(
name='{name}',
{sources_clause}
dependencies=[
{dependencies}
],
{provides_clause}
)
""").format(
name=name,
sources_clause='sources=[{0}],'.format(','.join(sources_strs)) if sources_strs else '',
dependencies=','.join(map(repr, dependencies)),
provides_clause='provides={0},'.format(provides) if provides else ''))
if source_contents_map:
self.create_file(relpath=os.path.join(relpath, '__init__.py'))
for source, contents in source_contents_map.items():
self.create_file(relpath=os.path.join(relpath, source), contents=contents)
return self.target(Address(relpath, name).spec)
def create_python_binary(self, relpath, name, entry_point, dependencies=(), provides=None):
"""
:API: public
"""
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_binary(
name='{name}',
entry_point='{entry_point}',
dependencies=[
{dependencies}
],
{provides_clause}
)
""").format(name=name, entry_point=entry_point, dependencies=','.join(map(repr, dependencies)),
provides_clause='provides={0},'.format(provides) if provides else ''))
return self.target(Address(relpath, name).spec)
def create_python_requirement_library(self, relpath, name, requirements):
"""
:API: public
"""
def make_requirement(req):
return 'python_requirement("{}")'.format(req)
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_requirement_library(
name='{name}',
requirements=[
{requirements}
]
)
""").format(name=name, requirements=','.join(map(make_requirement, requirements))))
return self.target(Address(relpath, name).spec)
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/uuid.py | 34 | 23209 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _popen(command, args):
import os
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path.extend(('/sbin', '/usr/sbin'))
for dir in path:
executable = os.path.join(dir, command)
if (os.path.exists(executable) and
os.access(executable, os.F_OK | os.X_OK) and
not os.path.isdir(executable)):
break
else:
return None
# LC_ALL to ensure English output, 2>/dev/null to prevent output on
# stderr (Note: we don't have an example where the words we search for
# are actually localized, but in theory some system could do so.)
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
return os.popen(cmd)
def _find_mac(command, args, hw_identifiers, get_index):
try:
pipe = _popen(command, args)
if not pipe:
return
with pipe:
for line in pipe:
words = line.lower().rstrip().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
try:
word = words[get_index(i)]
mac = int(word.replace(':', ''), 16)
if mac:
return mac
except (ValueError, IndexError):
# Virtual interfaces, such as those provided by
# VPNs, do not have a colon-delimited MAC address
# as expected, but a 16-byte HWAddr separated by
# dashes. These should be ignored in favor of a
# real MAC address
pass
except IOError:
pass
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
def _arp_getnode():
"""Get the hardware address on Unix by running arp."""
import os, socket
try:
ip_addr = socket.gethostbyname(socket.gethostname())
except EnvironmentError:
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
return _find_mac('arp', '-an', [ip_addr], lambda i: -1)
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
# This might work on HP-UX.
return _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
def _netstat_getnode():
"""Get the hardware address on Unix by running netstat."""
# This might work on AIX, Tru64 UNIX and presumably on IRIX.
try:
pipe = _popen('netstat', '-ia')
if not pipe:
return
with pipe:
words = pipe.readline().rstrip().split()
try:
i = words.index('Address')
except ValueError:
return
for line in pipe:
try:
words = line.rstrip().split()
word = words[i]
if len(word) == 17 and word.count(':') == 5:
mac = int(word.replace(':', ''), 16)
if mac:
return mac
except (ValueError, IndexError):
pass
except OSError:
pass
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
with pipe:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
if _uuid_generate_random is not None:
break # found everything we were looking for
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
import sys
if sys.platform == 'darwin':
import os
if int(os.uname()[2].split('.')[0]) >= 9:
_uuid_generate_random = _uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode, _arp_getnode,
_lanscan_getnode, _netstat_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds//100) + 0x01b21dd213814000L
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| mit |
glennyonemitsu/MarkupHiveServer | src/form/admin.py | 1 | 1667 | '''
All forms for admin control panel.
NOT the customers' manager
'''
from flask.ext.wtf import Form
from wtforms import BooleanField, DateTimeField, DecimalField, IntegerField, \
TextField
from wtforms.validators import InputRequired as Required
class AccountForm(Form):
join_timestamp = DateTimeField()
plan_code_name = TextField('Plan Code Name')
plan_name = TextField('Customer Facing Plan Name')
price = DecimalField('Monthly Price', places=2)
transfer = IntegerField('Monthly Data Transfer Limit')
size_static = IntegerField('Max Static File Size Per File')
size_total = IntegerField('Max Upload App Size')
count_template = IntegerField('Max Templates')
count_static = IntegerField('Max Static File Count')
custom_domain = BooleanField('Allow Custom Domains')
cms = BooleanField('CMS Enabled')
class PlanForm(Form):
code_name = TextField(
'Plan Code Name', [Required()])
name = TextField(
'Customer Facing Plan Name', [Required()])
price = DecimalField(
'Monthly Price', [Required()], places=2)
transfer = IntegerField(
'Monthly Data Transfer Limit', [Required()])
size_static = IntegerField(
'Max Static File Size Per File', [Required()])
size_total = IntegerField(
'Max Upload App Size', [Required()])
count_template = IntegerField(
'Max Templates', [Required()])
count_static = IntegerField(
'Max Static File Count', [Required()])
custom_domain = BooleanField('Allow Custom Domains')
cms = BooleanField('CMS Enabled')
available = BooleanField('Available for Registration')
| mit |
franc3000/temp-app | temp_app/app.py | 1 | 2398 | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from temp_app import commands, public, user
from temp_app.extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, webpack
from temp_app.settings import ProdConfig
import logging
# logging.config().dictConfig(LOGGING)
logging.basicConfig()
logger = logging.getLogger('MYAPP')
logger.info("Just testing")
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
logger.warning(config_object)
logger.warning(app.config)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
webpack.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| bsd-3-clause |
campbe13/openhatch | vendor/packages/PyYaml/lib3/yaml/composer.py | 273 | 4881 |
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor, self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| agpl-3.0 |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_pep292.py | 103 | 7682 | # Copyright (C) 2004 Python Software Foundation
# Author: barry@python.org (Barry Warsaw)
# License: http://www.opensource.org/licenses/PythonSoftFoundation.php
import unittest
from string import Template
class Bag:
pass
class Mapping:
def __getitem__(self, name):
obj = self
for part in name.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
raise KeyError(name)
return obj
class TestTemplate(unittest.TestCase):
def test_regular_templates(self):
s = Template('$who likes to eat a bag of $what worth $$100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
self.assertRaises(KeyError, s.substitute, dict(who='tim'))
def test_regular_templates_with_braces(self):
s = Template('$who likes ${what} for ${meal}')
d = dict(who='tim', what='ham', meal='dinner')
self.assertEqual(s.substitute(d), 'tim likes ham for dinner')
self.assertRaises(KeyError, s.substitute,
dict(who='tim', what='ham'))
def test_escapes(self):
eq = self.assertEqual
s = Template('$who likes to eat a bag of $$what worth $$100')
eq(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of $what worth $100')
s = Template('$who likes $$')
eq(s.substitute(dict(who='tim', what='ham')), 'tim likes $')
def test_percents(self):
eq = self.assertEqual
s = Template('%(foo)s $foo ${foo}')
d = dict(foo='baz')
eq(s.substitute(d), '%(foo)s baz baz')
eq(s.safe_substitute(d), '%(foo)s baz baz')
def test_stringification(self):
eq = self.assertEqual
s = Template('tim has eaten $count bags of ham today')
d = dict(count=7)
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
eq(s.safe_substitute(d), 'tim has eaten 7 bags of ham today')
s = Template('tim has eaten ${count} bags of ham today')
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
def test_tupleargs(self):
eq = self.assertEqual
s = Template('$who ate ${meal}')
d = dict(who=('tim', 'fred'), meal=('ham', 'kung pao'))
eq(s.substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
eq(s.safe_substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
def test_SafeTemplate(self):
eq = self.assertEqual
s = Template('$who likes ${what} for ${meal}')
eq(s.safe_substitute(dict(who='tim')), 'tim likes ${what} for ${meal}')
eq(s.safe_substitute(dict(what='ham')), '$who likes ham for ${meal}')
eq(s.safe_substitute(dict(what='ham', meal='dinner')),
'$who likes ham for dinner')
eq(s.safe_substitute(dict(who='tim', what='ham')),
'tim likes ham for ${meal}')
eq(s.safe_substitute(dict(who='tim', what='ham', meal='dinner')),
'tim likes ham for dinner')
def test_invalid_placeholders(self):
raises = self.assertRaises
s = Template('$who likes $')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes ${what)')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes $100')
raises(ValueError, s.substitute, dict(who='tim'))
def test_idpattern_override(self):
class PathPattern(Template):
idpattern = r'[_a-z][._a-z0-9]*'
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = PathPattern('$bag.foo.who likes to eat a bag of $bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
def test_pattern_override(self):
class MyPattern(Template):
pattern = r"""
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@)
"""
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = MyPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
class BadPattern(Template):
pattern = r"""
(?P<badname>.*) |
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@) |
"""
s = BadPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertRaises(ValueError, s.substitute, {})
self.assertRaises(ValueError, s.safe_substitute, {})
def test_unicode_values(self):
s = Template('$who likes $what')
d = dict(who=u't\xffm', what=u'f\xfe\fed')
self.assertEqual(s.substitute(d), u't\xffm likes f\xfe\x0ced')
def test_keyword_arguments(self):
eq = self.assertEqual
s = Template('$who likes $what')
eq(s.substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
def test_keyword_arguments_safe(self):
eq = self.assertEqual
raises = self.assertRaises
s = Template('$who likes $what')
eq(s.safe_substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.safe_substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.safe_substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
d = dict(mapping='one')
raises(TypeError, s.substitute, d, {})
raises(TypeError, s.safe_substitute, d, {})
def test_delimiter_override(self):
eq = self.assertEqual
raises = self.assertRaises
class AmpersandTemplate(Template):
delimiter = '&'
s = AmpersandTemplate('this &gift is for &{who} &&')
eq(s.substitute(gift='bud', who='you'), 'this bud is for you &')
raises(KeyError, s.substitute)
eq(s.safe_substitute(gift='bud', who='you'), 'this bud is for you &')
eq(s.safe_substitute(), 'this &gift is for &{who} &')
s = AmpersandTemplate('this &gift is for &{who} &')
raises(ValueError, s.substitute, dict(gift='bud', who='you'))
eq(s.safe_substitute(), 'this &gift is for &{who} &')
class PieDelims(Template):
delimiter = '@'
s = PieDelims('@who likes to eat a bag of @{what} worth $100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
def test_main():
from test import test_support
test_classes = [TestTemplate,]
test_support.run_unittest(*test_classes)
if __name__ == '__main__':
test_main()
| mit |
megraf/asuswrt-merlin | release/src/router/samba36/lib/dnspython/setup.py | 20 | 2509 | #!/usr/bin/env python
#
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
from distutils.core import setup
version = '1.9.3'
kwargs = {
'name' : 'dnspython',
'version' : version,
'description' : 'DNS toolkit',
'long_description' : \
"""dnspython is a DNS toolkit for Python. It supports almost all
record types. It can be used for queries, zone transfers, and dynamic
updates. It supports TSIG authenticated messages and EDNS0.
dnspython provides both high and low level access to DNS. The high
level classes perform queries for data of a given name, type, and
class, and return an answer set. The low level classes allow
direct manipulation of DNS zones, messages, names, and records.""",
'author' : 'Bob Halley',
'author_email' : 'halley@dnspython.org',
'license' : 'BSD-like',
'url' : 'http://www.dnspython.org',
'packages' : ['dns', 'dns.rdtypes', 'dns.rdtypes.IN', 'dns.rdtypes.ANY'],
}
if sys.hexversion >= 0x02020300:
kwargs['download_url'] = \
'http://www.dnspython.org/kits/%s/dnspython-%s.tar.gz' % (version,
version)
kwargs['classifiers'] = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: Freeware",
"Operating System :: Microsoft :: Windows :: Windows 95/98/2000",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Internet :: Name Service (DNS)",
"Topic :: Software Development :: Libraries :: Python Modules",
]
if sys.hexversion >= 0x02050000:
kwargs['requires'] = []
kwargs['provides'] = ['dns']
setup(**kwargs)
| gpl-2.0 |
bixbydev/Bixby | google/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/contacts/service.py | 120 | 17345 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContactsService extends the GDataService for Google Contacts operations.
ContactsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'dbrattli (Dag Brattli)'
import gdata
import gdata.calendar
import gdata.service
DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full'
'/batch')
DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com'
'/m8/feeds/profiles/default/full/batch')
GDATA_VER_HEADER = 'GData-Version'
class Error(Exception):
pass
class RequestError(Error):
pass
class ContactsService(gdata.service.GDataService):
"""Client for the Google Contacts service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', additional_headers=None,
contact_list='default', **kwargs):
"""Creates a client for the Contacts service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
contact_list: string (optional) The name of the default contact list to
use when no URI is specified to the methods of the service.
Default value: 'default' (the logged in user's contact list).
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.contact_list = contact_list
gdata.service.GDataService.__init__(
self, email=email, password=password, service='cp', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeedUri(self, kind='contacts', contact_list=None, projection='full',
scheme=None):
"""Builds a feed URI.
Args:
kind: The type of feed to return, typically 'groups' or 'contacts'.
Default value: 'contacts'.
contact_list: The contact list to return a feed for.
Default value: self.contact_list.
projection: The projection to apply to the feed contents, for example
'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'.
scheme: The URL scheme such as 'http' or 'https', None to return a
relative URI without hostname.
Returns:
A feed URI using the given kind, contact list, and projection.
Example: '/m8/feeds/contacts/default/full'.
"""
contact_list = contact_list or self.contact_list
if kind == 'profiles':
contact_list = 'domain/%s' % contact_list
prefix = scheme and '%s://%s' % (scheme, self.server) or ''
return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection)
def GetContactsFeed(self, uri=None):
uri = uri or self.GetFeedUri()
return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString)
def GetContact(self, uri):
return self.Get(uri, converter=gdata.contacts.ContactEntryFromString)
def CreateContact(self, new_contact, insert_uri=None, url_params=None,
escape_params=True):
"""Adds an new contact to Google Contacts.
Args:
new_contact: atom.Entry or subclass A new contact which is to be added to
Google Contacts.
insert_uri: the URL to post new contacts to the feed
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful insert, an entry containing the contact created
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
insert_uri = insert_uri or self.GetFeedUri()
return self.Post(new_contact, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def UpdateContact(self, edit_uri, updated_contact, url_params=None,
escape_params=True):
"""Updates an existing contact.
Args:
edit_uri: string The edit link URI for the element being updated
updated_contact: string, atom.Entry or subclass containing
the Atom Entry which will replace the contact which is
stored at the edit_url
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Put(updated_contact, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.ContactEntryFromString)
def DeleteContact(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
"""Removes an contact with the specified ID from Google Contacts.
Args:
edit_uri: string The edit URL of the entry to be deleted. Example:
'/m8/feeds/contacts/default/full/xxx/yyy'
url_params: dict (optional) Additional URL parameters to be included
in the deletion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
On successful delete, a httplib.HTTPResponse containing the server's
response to the DELETE request.
On failure, a RequestError is raised of the form:
{'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response}
"""
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def GetGroupsFeed(self, uri=None):
uri = uri or self.GetFeedUri('groups')
return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString)
def CreateGroup(self, new_group, insert_uri=None, url_params=None,
escape_params=True):
insert_uri = insert_uri or self.GetFeedUri('groups')
return self.Post(new_group, insert_uri, url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def UpdateGroup(self, edit_uri, updated_group, url_params=None,
escape_params=True):
return self.Put(updated_group, self._CleanUri(edit_uri),
url_params=url_params,
escape_params=escape_params,
converter=gdata.contacts.GroupEntryFromString)
def DeleteGroup(self, edit_uri, extra_headers=None,
url_params=None, escape_params=True):
return self.Delete(self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params)
def ChangePhoto(self, media, contact_entry_or_url, content_type=None,
content_length=None):
"""Change the photo for the contact by uploading a new photo.
Performs a PUT against the photo edit URL to send the binary data for the
photo.
Args:
media: filename, file-like-object, or a gdata.MediaSource object to send.
contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
method will search for an edit photo link URL and
perform a PUT to the URL.
content_type: str (optional) the mime type for the photo data. This is
necessary if media is a file or file name, but if media
is a MediaSource object then the media object can contain
the mime type. If media_type is set, it will override the
mime type in the media object.
content_length: int or str (optional) Specifying the content length is
only required if media is a file-like object. If media
is a filename, the length is determined using
os.path.getsize. If media is a MediaSource object, it is
assumed that it already contains the content length.
"""
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if isinstance(media, gdata.MediaSource):
payload = media
# If the media object is a file-like object, then use it as the file
# handle in the in the MediaSource.
elif hasattr(media, 'read'):
payload = gdata.MediaSource(file_handle=media,
content_type=content_type, content_length=content_length)
# Assume that the media object is a file name.
else:
payload = gdata.MediaSource(content_type=content_type,
content_length=content_length, file_path=media)
return self.Put(payload, url)
def GetPhoto(self, contact_entry_or_url):
"""Retrives the binary data for the contact's profile photo as a string.
Args:
contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
containing the photo link's URL. If the contact entry does not
contain a photo link, the image will not be fetched and this method
will return None.
"""
# TODO: add the ability to write out the binary image data to a file,
# reading and writing a chunk at a time to avoid potentially using up
# large amounts of memory.
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
photo_link = contact_entry_or_url.GetPhotoLink()
if photo_link:
url = photo_link.href
else:
url = contact_entry_or_url
if url:
return self.Get(url, converter=str)
else:
return None
def DeletePhoto(self, contact_entry_or_url):
url = None
if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry):
url = contact_entry_or_url.GetPhotoEditLink().href
else:
url = contact_entry_or_url
if url:
self.Delete(url)
def GetProfilesFeed(self, uri=None):
"""Retrieves a feed containing all domain's profiles.
Args:
uri: string (optional) the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full
Returns:
On success, a ProfilesFeed containing the profiles.
On failure, raises a RequestError.
"""
uri = uri or self.GetFeedUri('profiles')
return self.Get(uri,
converter=gdata.contacts.ProfilesFeedFromString)
def GetProfile(self, uri):
"""Retrieves a domain's profile for the user.
Args:
uri: string the URL to retrieve the profiles feed,
for example /m8/feeds/profiles/default/full/username
Returns:
On success, a ProfileEntry containing the profile for the user.
On failure, raises a RequestError
"""
return self.Get(uri,
converter=gdata.contacts.ProfileEntryFromString)
def UpdateProfile(self, edit_uri, updated_profile, url_params=None,
escape_params=True):
"""Updates an existing profile.
Args:
edit_uri: string The edit link URI for the element being updated
updated_profile: string atom.Entry or subclass containing
the Atom Entry which will replace the profile which is
stored at the edit_url.
url_params: dict (optional) Additional URL parameters to be included
in the update request.
escape_params: boolean (optional) If true, the url_params will be
escaped before they are included in the request.
Returns:
On successful update, a httplib.HTTPResponse containing the server's
response to the PUT request.
On failure, raises a RequestError.
"""
return self.Put(updated_profile, self._CleanUri(edit_uri),
url_params=url_params, escape_params=escape_params,
converter=gdata.contacts.ProfileEntryFromString)
def ExecuteBatch(self, batch_feed, url,
converter=gdata.contacts.ContactsFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.contacts.ContactFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: str The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is ContactsFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ContactsFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def ExecuteBatchProfiles(self, batch_feed, url,
converter=gdata.contacts.ProfilesFeedFromString):
"""Sends a batch request feed to the server.
Args:
batch_feed: gdata.profiles.ProfilesFeed A feed containing batch
request entries. Each entry contains the operation to be performed
on the data contained in the entry. For example an entry with an
operation type of insert will be used as if the individual entry
had been inserted.
url: string The batch URL to which these operations should be applied.
converter: Function (optional) The function used to convert the server's
response to an object. The default value is
gdata.profiles.ProfilesFeedFromString.
Returns:
The results of the batch request's execution on the server. If the
default converter is used, this is stored in a ProfilesFeed.
"""
return self.Post(batch_feed, url, converter=converter)
def _CleanUri(self, uri):
"""Sanitizes a feed URI.
Args:
uri: The URI to sanitize, can be relative or absolute.
Returns:
The given URI without its http://server prefix, if any.
Keeps the leading slash of the URI.
"""
url_prefix = 'http://%s' % self.server
if uri.startswith(url_prefix):
uri = uri[len(url_prefix):]
return uri
class ContactsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None, group=None):
self.feed = feed or '/m8/feeds/contacts/default/full'
if group:
self._SetGroup(group)
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
def _GetGroup(self):
if 'group' in self:
return self['group']
else:
return None
def _SetGroup(self, group_id):
self['group'] = group_id
group = property(_GetGroup, _SetGroup,
doc='The group query parameter to find only contacts in this group')
class GroupsQuery(gdata.service.Query):
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/groups/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
class ProfilesQuery(gdata.service.Query):
"""Constructs a query object for the profiles feed."""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
self.feed = feed or '/m8/feeds/profiles/default/full'
gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query,
params=params, categories=categories)
| gpl-3.0 |
ganeshnalawade/ansible | test/integration/targets/ansible-doc/library/test_docs_suboptions.py | 38 | 1590 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: test_docs_suboptions
short_description: Test module
description:
- Test module
author:
- Ansible Core Team
options:
with_suboptions:
description:
- An option with suboptions.
- Use with care.
type: dict
suboptions:
z_last:
description: The last suboption.
type: str
m_middle:
description:
- The suboption in the middle.
- Has its own suboptions.
suboptions:
a_suboption:
description: A sub-suboption.
type: str
a_first:
description: The first suboption.
type: str
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
test_docs_suboptions=dict(
type='dict',
options=dict(
a_first=dict(type='str'),
m_middle=dict(
type='dict',
options=dict(
a_suboption=dict(type='str')
),
),
z_last=dict(type='str'),
),
),
),
)
module.exit_json()
if __name__ == '__main__':
main()
| gpl-3.0 |
Migal/opt_ctrl_lab_1 | lab_1/utils.py | 1 | 13364 | import math
import numpy as np
from scipy.optimize import minimize
def array(f, numval, numdh):
"""Создать N-мерный массив.
param: f - функция, которая приминает N аргументов.
param: numval - диапазоны значений параметров функции. Список
param: numdh - шаги для параметров. Список
"""
def rec_for(f, numdim, numdh, current_l, l_i, arr):
"""Рекурсивный цикл.
param: f - функция, которая приминает N аргументов.
param: numdim - размерность выходной матрицы. Список
param: numdh - шаги для параметров. Список
param: current_l - текущая глубина рекурсии.
param: l_i - промежуточный список индексов. Список
param: arr - матрица, с которой мы работаем. np.array
"""
for i in range(numdim[current_l]):
l_i.append(i)
if current_l < len(numdim) - 1:
rec_for(f, numdim, numdh, current_l + 1, l_i, arr)
else:
args = (np.array(l_i) * np.array(numdh))
arr[tuple(l_i)] = f(*args)
l_i.pop()
return arr
numdim = [int(numval[i] / numdh[i]) + 1 for i in range(len(numdh))]
arr = np.zeros(numdim)
arr = rec_for(f, numdim, numdh, 0, [], arr)
# Надо отобразить так x - j, y - i (для графиков), поэтому используем transpose
arr = np.transpose(arr)
return arr
def TDMA(a, b, c, f):
"""Метод прогонки.
param: a - левая поддиагональ.
param: b - правая поддиагональ.
param: c - центр.
param: f - правая часть.
"""
#a, b, c, f = map(lambda k_list: map(float, k_list), (a, b, c, f))
alpha = [0]
beta = [0]
n = len(f)
x = [0] * n
for i in range(n - 1):
alpha.append(-b[i] / (a[i] * alpha[i] + c[i]))
beta.append((f[i] - a[i] * beta[i]) / (a[i] * alpha[i] + c[i]))
x[n - 1] = (f[n - 1] - a[n - 1] * beta[n - 1]) / (c[n - 1] + a[n - 1] * alpha[n - 1])
for i in reversed(range(n - 1)):
x[i] = alpha[i + 1] * x[i + 1] + beta[i + 1]
return x
def integral(arr, dh):
val = 0.
for i in range(0, len(arr) - 1):
val += arr[i] + arr[i + 1]
return val * dh / 2.
#----------------------------------------------------------------------------------------------------------------------
def criterion_1(model):
val = 0.
dt = model.dt
# Вычисление нормы разности
if len(model.p_arr) == 1:
val = 10000000.
else:
arr = (model.p_arr[-1] - model.p_arr[-2]) ** 2
val = integral(arr, dt)
return val
def criterion_2(model):
val_1, val_2 = 0., 0.
val = 0.
dh = model.dh
# Вычисление нормы разности функционала
if len(model.x_arr) == 1:
val = 10000000.
else:
arr_1 = (model.x_arr[-1][-1,:] - model.y_arr) ** 2
arr_2 = (model.x_arr[-2][-1,:] - model.y_arr) ** 2
val_1 = integral(arr_1, dh)
val_2 = integral(arr_2, dh)
val = abs(val_1 - val_2)
return val
def criterion_3(model):
val = 0.
dt = model.dt
# Вычисление нормы производной
if len(model.psi_arr) == 1:
val = 10000000.
else:
arr = (model.psi_arr[-1][:, -1] * model.a ** 2 * model.v) ** 2
val = integral(arr, dt)
return val
#----------------------------------------------------------------------------------------------------------------------
def f_alpha(alpha, model, ind):
val = .0
p_min, p_max = model.p_min, model.p_max
p_arr = model.p_arr[-1]
psi_l_arr = model.psi_arr[-1][:,-1]
p_cond = p_arr - alpha * psi_l_arr
p_cond[p_cond < p_min] = p_min
p_cond[p_cond > p_max] = p_max
matr = array(model.f, [model.l, model.T], [model.dh, model.dt])
matr[0,:] = array(model.fi, [model.l], [model.dh])
buf = 1. / (3. + 2. * model.dh * model.v)
# Число уравнений
eq_l = model.N - 1
f = [0. for i in range(eq_l)]
a2_dt_dh2 = model.a ** 2 * model.dt / model.dh ** 2
# Решаем 1 задачу
for j in range(0, model.M):
# f
f[0:-1] = [-matr[j, i] - model.dt * model.f_arr[j, i] for i in range(1, eq_l)]
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
f[-1] = -matr[j, -2] - model.dt * model.f_arr[j, -2]
f[-1] += -a2_dt_dh2 * 2. * model.dh * model.v * buf * p_cond[j + 1]
matr[j + 1,1:eq_l + 1] = TDMA(model.a_arr, model.b, model.c, f)
# Вычисляем первый и последний элементы
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
matr[j + 1, 0] = 4. / 3. * matr[j + 1, 1] - 1. / 3. * matr[j + 1, 2]
matr[j + 1, -1] = 4. * buf * matr[j + 1, -2]
matr[j + 1, -1] -= buf * matr[j + 1, -3]
matr[j + 1, -1] += 2. * model.dh * model.v * buf * p_cond[j + 1]
arr = (matr[-1,:] - model.y_arr) ** 2
val = integral(arr, model.dh)
return val
def get_alpha_1(model, ind):
val = 0.
bnds = ((0, None),)
res = minimize(f_alpha, 1., args=(model, ind), bounds=bnds, tol=10**-5)
val = res.x
return val
def get_alpha_5(model, ind):
val = 0.
c, alpha = 1., 3./4.
# Вычисление коэффициента
val = c * (float(ind) + 1.) ** -alpha
return val
def get_alpha_5_1(model, ind):
val = 0.
c, alpha = 10., 3./4.
# Вычисление коэффициента
val = c * (float(ind) + 1.) ** -alpha
return val
#-----------------------------------------------------------------------------
# Класс модели для Л.Р №1
class Lab1OptCtrlModel():
def __init__(self, p_d):
self.a, self.l, self.v, self.T = p_d['a'], p_d['l'], p_d['v'], p_d['T']
self.p, self.f = p_d['p(t)'], p_d['f(s, t)']
self.p_min, self.p_max, self.R = p_d['p_min'], p_d['p_max'], p_d['R']
self.fi, self.y = p_d['fi(s)'], p_d['y(s)']
self.dh, self.dt = p_d['dh'], p_d['dt']
self.N, self.M = p_d['N'], p_d['M']
self.p_arr = []
self.p_arr.append(array(self.p, [self.T], [self.dt]))
self.f_arr = array(self.f, [self.l, self.T], [self.dh, self.dt])
self.x_arr = []
self.x_arr.append(array(self.f, [self.l, self.T], [self.dh, self.dt]))
self.x_arr[-1][0,:] = array(self.fi, [self.l], [self.dh])
self.psi_arr = []
self.psi_arr.append(array(self.f, [self.l, self.T], [self.dh, self.dt]))
self.y_arr = array(self.y, [self.l], [self.dh])
self.alpha = []
self.final_step = 0
self.err = []
def solve(self, criterion, get_alpha, eps=10**-2, max_steps=None):
self.eps = eps
# Число уравнений
eq_l = self.N - 1
# Инициализация элементов для метода прогонки, которые постоянны
self.a_arr, self.b, self.c = [0. for i in range(eq_l)], [0. for i in range(eq_l)], [0. for i in range(eq_l)]
f = [0. for i in range(eq_l)]
a2_dt_dh2 = self.a ** 2 * self.dt / self.dh ** 2
buf = 1. / (3. + 2. * self.dh * self.v)
# a
self.a_arr[1:-1] = [a2_dt_dh2 for i in range(1, eq_l - 1)]
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.a_arr[-1] = a2_dt_dh2 * (1. - buf)
# b
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.b[0] = 2. / 3. * a2_dt_dh2
self.b[1:-1] = [a2_dt_dh2 for i in range(1, eq_l - 1)]
# c
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.c[0] = -2. / 3. * a2_dt_dh2 - 1.
self.c[1:-1] = [-1. - 2. * a2_dt_dh2 for i in range(1, eq_l - 1)]
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.c[-1] = -1. + a2_dt_dh2 * (4. * buf - 2.)
# c для 2 задачи
c_psi = [0. for i in range(eq_l)]
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
c_psi[0] = -1. - 2. / 3. * a2_dt_dh2
c_psi[1:-1] = [-1. - 2. * a2_dt_dh2 for i in range(1, eq_l - 1)]
c_psi[-1] = -1. + a2_dt_dh2 * (4. * buf - 2.)
# f для 2 задачи
f_psi = [0. for i in range(eq_l)]
ind = 0
apr_max_steps = True
self.err.append(criterion(self))
while self.err[-1] > self.eps and apr_max_steps:
# Решаем 1 задачу
for j in range(0, self.M):
# f
f[0:-1] = [-self.x_arr[-1][j, i] - self.dt * self.f_arr[j, i] for i in range(1, eq_l)]
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
f[-1] = -self.x_arr[-1][j, -2] - self.dt * self.f_arr[j, -2]
f[-1] += -a2_dt_dh2 * 2. * self.dh * self.v * buf * self.p_arr[-1][j + 1]
# Решаем задачу
self.x_arr[-1][j + 1,1:eq_l + 1] = TDMA(self.a_arr, self.b, self.c, f)
# Вычисляем первый и последний элементы
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.x_arr[-1][j + 1, 0] = 4. / 3. * self.x_arr[-1][j + 1, 1] - 1. / 3. * self.x_arr[-1][j + 1, 2]
self.x_arr[-1][j + 1, -1] = 4. * buf * self.x_arr[-1][j + 1, -2]
self.x_arr[-1][j + 1, -1] -= buf * self.x_arr[-1][j + 1, -3]
self.x_arr[-1][j + 1, -1] += 2. * self.dh * self.v * buf * self.p_arr[-1][j + 1]
# Берем условия по времени для psi
self.psi_arr[-1][-1,:] = 2. * (self.x_arr[-1][-1,:] - self.y_arr)
# Решаем 2 задачу
for j in range(self.M - 1, -1, -1):
# f
f_psi = [-self.psi_arr[-1][j + 1, i] for i in range(1, eq_l + 1)]
# Решаем задачу
self.psi_arr[-1][j,1:eq_l + 1] = TDMA(self.a_arr, self.b, c_psi, f_psi)
# Вычисляем первый и последний элементы
# Эта часть зависит от апроксимации, которую мы используем, поэтому стоит ввести функцию
self.psi_arr[-1][j, 0] = 4. / 3. * self.psi_arr[-1][j, 1] - 1. / 3. * self.psi_arr[-1][j, 2]
self.psi_arr[-1][j, -1] = 4. * buf * self.psi_arr[-1][j, -2]
self.psi_arr[-1][j, -1] -= buf * self.psi_arr[-1][j, -3]
# Вычисляем новое p по методу проекции градиента
self.alpha.append(get_alpha(self, ind))
self.p_arr.append(self.p_arr[-1] - self.alpha[-1] * self.a ** 2 * self.v * self.psi_arr[-1][:,-1])
self.p_arr[-1][self.p_arr[-1] < self.p_min] = self.p_min
self.p_arr[-1][self.p_arr[-1] > self.p_max] = self.p_max
self.final_step = ind
ind += 1
err = criterion(self)
print(err)
self.err.append(err)
if max_steps is None:
apr_max_steps = True
else:
apr_max_steps = ind < max_steps
# Для нового шага
self.x_arr.append(array(self.f, [self.l, self.T], [self.dh, self.dt]))
self.x_arr[-1][0,:] = array(self.fi, [self.l], [self.dh])
self.psi_arr.append(array(self.f, [self.l, self.T], [self.dh, self.dt]))
self.x_arr.pop()
self.psi_arr.pop()
return self
| bsd-3-clause |
takahirox/three.js | utils/converters/obj/convert_obj_three_for_python3.py | 68 | 48066 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MultiMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in range(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Dissolves
# d 0.9
if chunks[0] == "d" and len(chunks) == 2:
materials[identifier]["opacity"] = float(chunks[1])
if materials[identifier]["opacity"] < 1.0:
materials[identifier]["transparent"] = True
# Transparency
# Tr 0.1
if chunks[0] == "Tr" and len(chunks) == 2:
materials[identifier]["opacity"] = 1.0 - float(chunks[1])
if materials[identifier]["opacity"] < 1.0:
materials[identifier]["transparent"] = True
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in range(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print("WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices))
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print("adding [%s] with %d vertices" % (name, n_morph_vertices))
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print("WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices))
elif n_faces != n_morph_faces:
print("WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces))
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print("adding [%s] with %d face colors" % (name, len(morphFaceColors)))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print("Couldn't find [%s]" % fname)
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print("Couldn't find [%s]" % infile)
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print("%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials)))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print("Couldn't find [%s]" % infile)
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack(b'<12s', b'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write(b"".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print("Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0]))
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print("Converting [%s] into [%s] ..." % (infile, outfile))
if morphfiles:
print("Morphs [%s]" % morphfiles)
if colorfiles:
print("Colors [%s]" % colorfiles)
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| mit |
Grirrane/odoo | addons/product/partner.py | 8 | 1627 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_columns = {
'property_product_pricelist': fields.property(
type='many2one',
relation='product.pricelist',
domain=[('type','=','sale')],
string="Sale Pricelist",
help="This pricelist will be used, instead of the default one, for sales to the current partner"),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_product_pricelist']
| agpl-3.0 |
Lucterios2/documents | lucterios/documents/__init__.py | 1 | 1251 | # -*- coding: utf-8 -*-
'''
lucterios.contacts package
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from os.path import dirname, join, isfile
def get_build():
file_name = join(dirname(__file__), 'build')
if isfile(file_name):
with open(file_name) as flb:
return flb.read()
return "0"
__version__ = "2.5.2." + get_build()
def __title__():
from django.utils.translation import ugettext_lazy as _
return _("Lucterios documents")
def link():
return []
| gpl-3.0 |
ubic135/odoo-design | addons/analytic/report/analytic_inverted_balance.py | 23 | 5761 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
class report_invertedanalyticbalance(osv.AbstractModel):
_name = 'report.account.report_invertedanalyticbalance'
_inherit = 'report.abstract_report'
_template = 'analytic.report_invertedanalyticbalance'
_wrapped_report_class = account_inverted_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jatinmistry13/pattern | pattern/web/pdf/pdfparser.py | 53 | 27593 | #!/usr/bin/env python2
import sys
import re
import struct
try:
import hashlib as md5
except ImportError:
import md5
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from psparser import PSStackParser
from psparser import PSSyntaxError, PSEOF
from psparser import literal_name
from psparser import LIT, KWD, STRICT
from pdftypes import PDFException, PDFTypeError, PDFNotImplementedError
from pdftypes import PDFStream, PDFObjRef
from pdftypes import resolve1, decipher_all
from pdftypes import int_value, float_value, num_value
from pdftypes import str_value, list_value, dict_value, stream_value
from arcfour import Arcfour
from utils import choplist, nunpack
from utils import decode_text, ObjIdRange
## Exceptions
##
class PDFSyntaxError(PDFException): pass
class PDFNoValidXRef(PDFSyntaxError): pass
class PDFNoOutlines(PDFException): pass
class PDFDestinationNotFound(PDFException): pass
class PDFEncryptionError(PDFException): pass
class PDFPasswordIncorrect(PDFEncryptionError): pass
# some predefined literals and keywords.
LITERAL_OBJSTM = LIT('ObjStm')
LITERAL_XREF = LIT('XRef')
LITERAL_PAGE = LIT('Page')
LITERAL_PAGES = LIT('Pages')
LITERAL_CATALOG = LIT('Catalog')
## XRefs
##
class PDFBaseXRef(object):
def get_trailer(self):
raise NotImplementedError
def get_objids(self):
return []
def get_pos(self, objid):
raise KeyError(objid)
## PDFXRef
##
class PDFXRef(PDFBaseXRef):
def __init__(self):
self.offsets = {}
self.trailer = {}
return
def load(self, parser, debug=0):
while 1:
try:
(pos, line) = parser.nextline()
if not line.strip(): continue
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
if not line:
raise PDFNoValidXRef('Premature eof: %r' % parser)
if line.startswith('trailer'):
parser.seek(pos)
break
f = line.strip().split(' ')
if len(f) != 2:
raise PDFNoValidXRef('Trailer not found: %r: line=%r' % (parser, line))
try:
(start, nobjs) = map(long, f)
except ValueError:
raise PDFNoValidXRef('Invalid line: %r: line=%r' % (parser, line))
for objid in xrange(start, start+nobjs):
try:
(_, line) = parser.nextline()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
f = line.strip().split(' ')
if len(f) != 3:
raise PDFNoValidXRef('Invalid XRef format: %r, line=%r' % (parser, line))
(pos, genno, use) = f
if use != 'n': continue
self.offsets[objid] = (int(genno), long(pos))
if 1 <= debug:
print >>sys.stderr, 'xref objects:', self.offsets
self.load_trailer(parser)
return
KEYWORD_TRAILER = KWD('trailer')
def load_trailer(self, parser):
try:
(_,kwd) = parser.nexttoken()
assert kwd is self.KEYWORD_TRAILER
(_,dic) = parser.nextobject()
except PSEOF:
x = parser.pop(1)
if not x:
raise PDFNoValidXRef('Unexpected EOF - file corrupted')
(_,dic) = x[0]
self.trailer.update(dict_value(dic))
return
PDFOBJ_CUE = re.compile(r'^(\d+)\s+(\d+)\s+obj\b')
def load_fallback(self, parser, debug=0):
parser.seek(0)
while 1:
try:
(pos, line) = parser.nextline()
except PSEOF:
break
if line.startswith('trailer'):
parser.seek(pos)
self.load_trailer(parser)
if 1 <= debug:
print >>sys.stderr, 'trailer: %r' % self.get_trailer()
break
m = self.PDFOBJ_CUE.match(line)
if not m: continue
(objid, genno) = m.groups()
self.offsets[int(objid)] = (0, pos)
return
def get_trailer(self):
return self.trailer
def get_objids(self):
return self.offsets.iterkeys()
def get_pos(self, objid):
try:
(genno, pos) = self.offsets[objid]
except KeyError:
raise
return (None, pos)
## PDFXRefStream
##
class PDFXRefStream(PDFBaseXRef):
def __init__(self):
self.data = None
self.entlen = None
self.fl1 = self.fl2 = self.fl3 = None
self.objid_ranges = []
return
def __repr__(self):
return '<PDFXRefStream: fields=%d,%d,%d>' % (self.fl1, self.fl2, self.fl3)
def load(self, parser, debug=0):
(_,objid) = parser.nexttoken() # ignored
(_,genno) = parser.nexttoken() # ignored
(_,kwd) = parser.nexttoken()
(_,stream) = parser.nextobject()
if not isinstance(stream, PDFStream) or stream['Type'] is not LITERAL_XREF:
raise PDFNoValidXRef('Invalid PDF stream spec.')
size = stream['Size']
index_array = stream.get('Index', (0,size))
if len(index_array) % 2 != 0:
raise PDFSyntaxError('Invalid index number')
self.objid_ranges.extend( ObjIdRange(start, nobjs)
for (start,nobjs) in choplist(2, index_array) )
(self.fl1, self.fl2, self.fl3) = stream['W']
self.data = stream.get_data()
self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.attrs
if 1 <= debug:
print >>sys.stderr, ('xref stream: objid=%s, fields=%d,%d,%d' %
(', '.join(map(repr, self.objid_ranges)),
self.fl1, self.fl2, self.fl3))
return
def get_trailer(self):
return self.trailer
def get_objids(self):
for objid_range in self.objid_ranges:
for x in xrange(objid_range.get_start_id(), objid_range.get_end_id()+1):
yield x
return
def get_pos(self, objid):
offset = 0
found = False
for objid_range in self.objid_ranges:
if objid >= objid_range.get_start_id() and objid <= objid_range.get_end_id():
offset += objid - objid_range.get_start_id()
found = True
break
else:
offset += objid_range.get_nobjs()
if not found: raise KeyError(objid)
i = self.entlen * offset
ent = self.data[i:i+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
if f1 == 1:
pos = nunpack(ent[self.fl1:self.fl1+self.fl2])
genno = nunpack(ent[self.fl1+self.fl2:])
return (None, pos)
elif f1 == 2:
objid = nunpack(ent[self.fl1:self.fl1+self.fl2])
index = nunpack(ent[self.fl1+self.fl2:])
return (objid, index)
# this is a free object
raise KeyError(objid)
## PDFPage
##
class PDFPage(object):
"""An object that holds the information about a page.
A PDFPage object is merely a convenience class that has a set
of keys and values, which describe the properties of a page
and point to its contents.
Attributes:
doc: a PDFDocument object.
pageid: any Python object that can uniquely identify the page.
attrs: a dictionary of page attributes.
contents: a list of PDFStream objects that represents the page content.
lastmod: the last modified time of the page.
resources: a list of resources used by the page.
mediabox: the physical size of the page.
cropbox: the crop rectangle of the page.
rotate: the page rotation (in degree).
annots: the page annotations.
beads: a chain that represents natural reading order.
"""
def __init__(self, doc, pageid, attrs):
"""Initialize a page object.
doc: a PDFDocument object.
pageid: any Python object that can uniquely identify the page.
attrs: a dictionary of page attributes.
"""
self.doc = doc
self.pageid = pageid
self.attrs = dict_value(attrs)
self.lastmod = resolve1(self.attrs.get('LastModified'))
self.resources = resolve1(self.attrs['Resources'])
self.mediabox = resolve1(self.attrs['MediaBox'])
if 'CropBox' in self.attrs:
self.cropbox = resolve1(self.attrs['CropBox'])
else:
self.cropbox = self.mediabox
self.rotate = (self.attrs.get('Rotate', 0)+360) % 360
self.annots = self.attrs.get('Annots')
self.beads = self.attrs.get('B')
if 'Contents' in self.attrs:
contents = resolve1(self.attrs['Contents'])
else:
contents = []
if not isinstance(contents, list):
contents = [ contents ]
self.contents = contents
return
def __repr__(self):
return '<PDFPage: Resources=%r, MediaBox=%r>' % (self.resources, self.mediabox)
## PDFDocument
##
class PDFDocument(object):
"""PDFDocument object represents a PDF document.
Since a PDF file can be very big, normally it is not loaded at
once. So PDF document has to cooperate with a PDF parser in order to
dynamically import the data as processing goes.
Typical usage:
doc = PDFDocument()
doc.set_parser(parser)
doc.initialize(password)
obj = doc.getobj(objid)
"""
debug = 0
def __init__(self, caching=True):
self.caching = caching
self.xrefs = []
self.info = []
self.catalog = None
self.encryption = None
self.decipher = None
self._parser = None
self._cached_objs = {}
self._parsed_objs = {}
return
def set_parser(self, parser):
"Set the document to use a given PDFParser object."
if self._parser: return
self._parser = parser
# Retrieve the information of each header that was appended
# (maybe multiple times) at the end of the document.
self.xrefs = parser.read_xref()
for xref in self.xrefs:
trailer = xref.get_trailer()
if not trailer: continue
# If there's an encryption info, remember it.
if 'Encrypt' in trailer:
#assert not self.encryption
self.encryption = (list_value(trailer['ID']),
dict_value(trailer['Encrypt']))
if 'Info' in trailer:
self.info.append(dict_value(trailer['Info']))
if 'Root' in trailer:
# Every PDF file must have exactly one /Root dictionary.
self.catalog = dict_value(trailer['Root'])
break
else:
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
if self.catalog.get('Type') is not LITERAL_CATALOG:
if STRICT:
raise PDFSyntaxError('Catalog not found!')
return
# initialize(password='')
# Perform the initialization with a given password.
# This step is mandatory even if there's no password associated
# with the document.
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
def initialize(self, password=''):
if not self.encryption:
self.is_printable = self.is_modifiable = self.is_extractable = True
return
(docid, param) = self.encryption
if literal_name(param.get('Filter')) != 'Standard':
raise PDFEncryptionError('Unknown filter: param=%r' % param)
V = int_value(param.get('V', 0))
if not (V == 1 or V == 2):
raise PDFEncryptionError('Unknown algorithm: param=%r' % param)
length = int_value(param.get('Length', 40)) # Key length (bits)
O = str_value(param['O'])
R = int_value(param['R']) # Revision
if 5 <= R:
raise PDFEncryptionError('Unknown revision: %r' % R)
U = str_value(param['U'])
P = int_value(param['P'])
self.is_printable = bool(P & 4)
self.is_modifiable = bool(P & 8)
self.is_extractable = bool(P & 16)
# Algorithm 3.2
password = (password+self.PASSWORD_PADDING)[:32] # 1
hash = md5.md5(password) # 2
hash.update(O) # 3
hash.update(struct.pack('<l', P)) # 4
hash.update(docid[0]) # 5
if 4 <= R:
# 6
raise PDFNotImplementedError('Revision 4 encryption is currently unsupported')
if 3 <= R:
# 8
for _ in xrange(50):
hash = md5.md5(hash.digest()[:length/8])
key = hash.digest()[:length/8]
if R == 2:
# Algorithm 3.4
u1 = Arcfour(key).process(self.PASSWORD_PADDING)
elif R == 3:
# Algorithm 3.5
hash = md5.md5(self.PASSWORD_PADDING) # 2
hash.update(docid[0]) # 3
x = Arcfour(key).process(hash.digest()[:16]) # 4
for i in xrange(1,19+1):
k = ''.join( chr(ord(c) ^ i) for c in key )
x = Arcfour(k).process(x)
u1 = x+x # 32bytes total
if R == 2:
is_authenticated = (u1 == U)
else:
is_authenticated = (u1[:16] == U[:16])
if not is_authenticated:
raise PDFPasswordIncorrect
self.decrypt_key = key
self.decipher = self.decrypt_rc4 # XXX may be AES
return
def decrypt_rc4(self, objid, genno, data):
key = self.decrypt_key + struct.pack('<L',objid)[:3]+struct.pack('<L',genno)[:2]
hash = md5.md5(key)
key = hash.digest()[:min(len(key),16)]
return Arcfour(key).process(data)
KEYWORD_OBJ = KWD('obj')
def getobj(self, objid):
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
if 2 <= self.debug:
print >>sys.stderr, 'getobj: objid=%r' % (objid)
if objid in self._cached_objs:
genno = 0
obj = self._cached_objs[objid]
else:
for xref in self.xrefs:
try:
(strmid, index) = xref.get_pos(objid)
break
except KeyError:
pass
else:
if STRICT:
raise PDFSyntaxError('Cannot locate objid=%r' % objid)
# return null for a nonexistent reference.
return None
if strmid:
stream = stream_value(self.getobj(strmid))
if stream.get('Type') is not LITERAL_OBJSTM:
if STRICT:
raise PDFSyntaxError('Not a stream object: %r' % stream)
try:
n = stream['N']
except KeyError:
if STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
if strmid in self._parsed_objs:
objs = self._parsed_objs[strmid]
else:
parser = PDFStreamParser(stream.get_data())
parser.set_document(self)
objs = []
try:
while 1:
(_,obj) = parser.nextobject()
objs.append(obj)
except PSEOF:
pass
if self.caching:
self._parsed_objs[strmid] = objs
genno = 0
i = n*2+index
try:
obj = objs[i]
except IndexError:
raise PDFSyntaxError('Invalid object number: objid=%r' % (objid))
if isinstance(obj, PDFStream):
obj.set_objid(objid, 0)
else:
self._parser.seek(index)
(_,objid1) = self._parser.nexttoken() # objid
(_,genno) = self._parser.nexttoken() # genno
(_,kwd) = self._parser.nexttoken()
# #### hack around malformed pdf files
#assert objid1 == objid, (objid, objid1)
if objid1 != objid:
x = []
while kwd is not self.KEYWORD_OBJ:
(_,kwd) = self._parser.nexttoken()
x.append(kwd)
if x:
objid1 = x[-2]
genno = x[-1]
# #### end hack around malformed pdf files
if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError('Invalid object spec: offset=%r' % index)
try:
(_,obj) = self._parser.nextobject()
if isinstance(obj, PDFStream):
obj.set_objid(objid, genno)
except PSEOF:
return None
if 2 <= self.debug:
print >>sys.stderr, 'register: objid=%r: %r' % (objid, obj)
if self.caching:
self._cached_objs[objid] = obj
if self.decipher:
obj = decipher_all(self.decipher, objid, genno, obj)
return obj
INHERITABLE_ATTRS = set(['Resources', 'MediaBox', 'CropBox', 'Rotate'])
def get_pages(self):
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
def search(obj, parent):
if isinstance(obj, int):
objid = obj
tree = dict_value(self.getobj(objid)).copy()
else:
objid = obj.objid
tree = dict_value(obj).copy()
for (k,v) in parent.iteritems():
if k in self.INHERITABLE_ATTRS and k not in tree:
tree[k] = v
if tree.get('Type') is LITERAL_PAGES and 'Kids' in tree:
if 1 <= self.debug:
print >>sys.stderr, 'Pages: Kids=%r' % tree['Kids']
for c in list_value(tree['Kids']):
for x in search(c, tree):
yield x
elif tree.get('Type') is LITERAL_PAGE:
if 1 <= self.debug:
print >>sys.stderr, 'Page: %r' % tree
yield (objid, tree)
if 'Pages' not in self.catalog: return
for (pageid,tree) in search(self.catalog['Pages'], self.catalog):
yield PDFPage(self, pageid, tree)
return
def get_outlines(self):
if 'Outlines' not in self.catalog:
raise PDFNoOutlines
def search(entry, level):
entry = dict_value(entry)
if 'Title' in entry:
if 'A' in entry or 'Dest' in entry:
title = decode_text(str_value(entry['Title']))
dest = entry.get('Dest')
action = entry.get('A')
se = entry.get('SE')
yield (level, title, dest, action, se)
if 'First' in entry and 'Last' in entry:
for x in search(entry['First'], level+1):
yield x
if 'Next' in entry:
for x in search(entry['Next'], level):
yield x
return
return search(self.catalog['Outlines'], 0)
def lookup_name(self, cat, key):
try:
names = dict_value(self.catalog['Names'])
except (PDFTypeError, KeyError):
raise KeyError((cat,key))
# may raise KeyError
d0 = dict_value(names[cat])
def lookup(d):
if 'Limits' in d:
(k1,k2) = list_value(d['Limits'])
if key < k1 or k2 < key: return None
if 'Names' in d:
objs = list_value(d['Names'])
names = dict(choplist(2, objs))
return names[key]
if 'Kids' in d:
for c in list_value(d['Kids']):
v = lookup(dict_value(c))
if v: return v
raise KeyError((cat,key))
return lookup(d0)
def get_dest(self, name):
try:
# PDF-1.2 or later
obj = self.lookup_name('Dests', name)
except KeyError:
# PDF-1.1 or prior
if 'Dests' not in self.catalog:
raise PDFDestinationNotFound(name)
d0 = dict_value(self.catalog['Dests'])
if name not in d0:
raise PDFDestinationNotFound(name)
obj = d0[name]
return obj
## PDFParser
##
class PDFParser(PSStackParser):
"""
PDFParser fetch PDF objects from a file stream.
It can handle indirect references by referring to
a PDF document set by set_document method.
It also reads XRefs at the end of every PDF file.
Typical usage:
parser = PDFParser(fp)
parser.read_xref()
parser.set_document(doc)
parser.seek(offset)
parser.nextobject()
"""
def __init__(self, fp):
PSStackParser.__init__(self, fp)
self.doc = None
self.fallback = False
return
def set_document(self, doc):
"""Associates the parser with a PDFDocument object."""
self.doc = doc
return
KEYWORD_R = KWD('R')
KEYWORD_NULL = KWD('null')
KEYWORD_ENDOBJ = KWD('endobj')
KEYWORD_STREAM = KWD('stream')
KEYWORD_XREF = KWD('xref')
KEYWORD_STARTXREF = KWD('startxref')
def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_,objid), (_,genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_,dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = self.fp.read(objlen)
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if 'endstream' in line:
i = line.index('endstream')
objlen += i
data += line[:i]
break
objlen += len(line)
data += line
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
if 2 <= self.debug:
print >>sys.stderr, 'Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \
(pos, objlen, dic, data[:10])
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return
def find_xref(self):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in self.revreadlines():
line = line.strip()
if 2 <= self.debug:
print >>sys.stderr, 'find_xref: %r' % line
if line == 'startxref': break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if 1 <= self.debug:
print >>sys.stderr, 'xref found: pos=%r' % prev
return long(prev)
# read xref table
def read_xref_from(self, start, xrefs):
"""Reads XRefs from the given location."""
self.seek(start)
self.reset()
try:
(pos, token) = self.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
if 2 <= self.debug:
print >>sys.stderr, 'read_xref_from: start=%d, token=%r' % (start, token)
if isinstance(token, int):
# XRefStream: PDF-1.5
self.seek(pos)
self.reset()
xref = PDFXRefStream()
xref.load(self, debug=self.debug)
else:
if token is self.KEYWORD_XREF:
self.nextline()
xref = PDFXRef()
xref.load(self, debug=self.debug)
xrefs.append(xref)
trailer = xref.get_trailer()
if 1 <= self.debug:
print >>sys.stderr, 'trailer: %r' % trailer
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(pos, xrefs)
return
# read xref tables and trailers
def read_xref(self):
"""Reads all the XRefs in the PDF file and returns them."""
xrefs = []
try:
pos = self.find_xref()
self.read_xref_from(pos, xrefs)
except PDFNoValidXRef:
# fallback
if 1 <= self.debug:
print >>sys.stderr, 'no xref, fallback'
self.fallback = True
xref = PDFXRef()
xref.load_fallback(self)
xrefs.append(xref)
return xrefs
## PDFStreamParser
##
class PDFStreamParser(PDFParser):
"""
PDFStreamParser is used to parse PDF content streams
that is contained in each page and has instructions
for rendering the page. A reference to a PDF document is
needed because a PDF content stream can also have
indirect references to other objects in the same document.
"""
def __init__(self, data):
PDFParser.__init__(self, StringIO(data))
return
def flush(self):
self.add_results(*self.popall())
return
def do_keyword(self, pos, token):
if token is self.KEYWORD_R:
# reference to indirect object
try:
((_,objid), (_,genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
return
# others
self.push((pos, token))
return
| bsd-3-clause |
nicolas-f/I-Simpa | currentRelease/SystemScript/graphy/backends/google_chart_api/pie_chart_test.py | 33 | 5892 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for Graphy and Google Chart API backend."""
import warnings
from graphy import graphy_test
from graphy import pie_chart
from graphy.backends import google_chart_api
from graphy.backends.google_chart_api import base_encoder_test
# Extend BaseChartTest so that we pick up & repeat all the line tests which
# Pie Charts should continue to satisfy
class PieChartTest(base_encoder_test.BaseChartTest):
def tearDown(self):
warnings.resetwarnings()
super(PieChartTest, self).tearDown()
def GetChart(self, *args, **kwargs):
return google_chart_api.PieChart(*args, **kwargs)
def AddToChart(self, chart, points, color=None, label=None):
return chart.AddSegment(points[0], color=color, label=label)
def testCanRemoveDefaultFormatters(self):
# Override this test, as pie charts don't have default formatters.
pass
def testChartType(self):
self.chart.display.is3d = False
self.assertEqual(self.Param('cht'), 'p')
self.chart.display.is3d = True
self.assertEqual(self.Param('cht'), 'p3')
def testEmptyChart(self):
self.assertEqual(self.Param('chd'), 's:')
self.assertEqual(self.Param('chco'), '')
self.assertEqual(self.Param('chl'), '')
def testChartCreation(self):
self.chart = self.GetChart([1,2,3], ['Mouse', 'Cat', 'Dog'])
self.assertEqual(self.Param('chd'), 's:Up9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog')
self.assertEqual(self.Param('cht'), 'p')
# TODO: Get 'None' labels to work and test them
def testAddSegment(self):
self.chart = self.GetChart([1,2,3], ['Mouse', 'Cat', 'Dog'])
self.chart.AddSegment(4, label='Horse')
self.assertEqual(self.Param('chd'), 's:Pfu9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog|Horse')
# TODO: Remove this when AddSegments is removed
def testAddMultipleSegments(self):
warnings.filterwarnings('ignore')
self.chart.AddSegments([1,2,3],
['Mouse', 'Cat', 'Dog'],
['ff0000', '00ff00', '0000ff'])
self.assertEqual(self.Param('chd'), 's:Up9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog')
self.assertEqual(self.Param('chco'), 'ff0000,00ff00,0000ff')
# skip two colors
self.chart.AddSegments([4,5,6], ['Horse', 'Moose', 'Elephant'], ['cccccc'])
self.assertEqual(self.Param('chd'), 's:KUfpz9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog|Horse|Moose|Elephant')
self.assertEqual(self.Param('chco'), 'ff0000,00ff00,0000ff,cccccc')
def testMultiplePies(self):
self.chart.AddPie([1,2,3],
['Mouse', 'Cat', 'Dog'],
['ff0000', '00ff00', '0000ff'])
self.assertEqual(self.Param('chd'), 's:Up9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog')
self.assertEqual(self.Param('chco'), 'ff0000,00ff00,0000ff')
self.assertEqual(self.Param('cht'), 'p')
# skip two colors
self.chart.AddPie([4,5,6], ['Horse', 'Moose', 'Elephant'], ['cccccc'])
self.assertEqual(self.Param('chd'), 's:KUf,pz9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog|Horse|Moose|Elephant')
self.assertEqual(self.Param('chco'), 'ff0000,00ff00,0000ff,cccccc')
self.assertEqual(self.Param('cht'), 'pc')
def testMultiplePiesNo3d(self):
chart = self.GetChart([1,2,3], ['Mouse', 'Cat', 'Dog'])
chart.AddPie([4,5,6], ['Horse', 'Moose', 'Elephant'])
chart.display.is3d = True
warnings.filterwarnings('error')
self.assertRaises(RuntimeWarning, chart.display.Url, 320, 240)
def testAddSegmentByIndex(self):
self.chart = self.GetChart([1,2,3], ['Mouse', 'Cat', 'Dog'])
self.chart.AddSegment(4, 'Horse', pie_index=0)
self.assertEqual(self.Param('chd'), 's:Pfu9')
self.assertEqual(self.Param('chl'), 'Mouse|Cat|Dog|Horse')
self.chart.AddPie([4,5], ['Apple', 'Orange'], [])
self.chart.AddSegment(6, 'Watermelon', pie_index=1)
self.assertEqual(self.Param('chd'), 's:KUfp,pz9')
def testSetColors(self):
self.assertEqual(self.Param('chco'), '')
self.chart.AddSegment(1, label='Mouse')
self.chart.AddSegment(5, label='Moose')
self.chart.SetColors('000033', '0000ff')
self.assertEqual(self.Param('chco'), '000033,0000ff')
self.chart.AddSegment(6, label='Elephant')
self.assertEqual(self.Param('chco'), '000033,0000ff')
def testHugeSegmentSizes(self):
self.chart = self.GetChart([1000000000000000L,3000000000000000L],
['Big', 'Uber'])
self.assertEqual(self.Param('chd'), 's:U9')
self.chart.display.enhanced_encoding = True
self.assertEqual(self.Param('chd'), 'e:VV..')
def testSetSegmentSize(self):
segment1 = self.chart.AddSegment(1)
segment2 = self.chart.AddSegment(2)
self.assertEqual(self.Param('chd'), 's:f9')
segment2.size = 3
self.assertEquals(segment1.size, 1)
self.assertEquals(segment2.size, 3)
self.assertEqual(self.Param('chd'), 's:U9')
def testChartAngle(self):
self.assertTrue('chp' not in self.chart.display._Params(self.chart))
self.chart.display.angle = 3.1415
self.assertEqual(self.Param('chp'), '3.1415')
self.chart.display.angle = 0
self.assertTrue('chp' not in self.chart.display._Params(self.chart))
if __name__ == '__main__':
graphy_test.main()
| gpl-3.0 |
hivesolutions/netius | src/netius/extra/file.py | 1 | 31736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import re
import datetime
import mimetypes
import netius.common
import netius.servers
BUFFER_SIZE = 32768
""" The size of the buffer that is going to be used when
sending the file to the client, this should not be neither
to big nor to small (as both situations would create problems) """
FOLDER_SVG = "<svg aria-hidden=\"true\" class=\"octicon octicon-file-directory\" height=\"16\" version=\"1.1\" viewBox=\"0 0 14 16\" width=\"14\"><path d=\"M13 4H7V3c0-.66-.31-1-1-1H1c-.55 0-1 .45-1 1v10c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V5c0-.55-.45-1-1-1zM6 4H1V3h5v1z\"></path></svg>"
""" The vector code to be used for the icon that represents
a folder under the directory listing """
FILE_SVG = "<svg aria-hidden=\"true\" class=\"octicon octicon-file-text\" height=\"16\" version=\"1.1\" viewBox=\"0 0 12 16\" width=\"12\"><path d=\"M6 5H2V4h4v1zM2 8h7V7H2v1zm0 2h7V9H2v1zm0 2h7v-1H2v1zm10-7.5V14c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V2c0-.55.45-1 1-1h7.5L12 4.5zM11 5L8 2H1v12h10V5z\"></path></svg>"
""" The vector code to be used for the icon that represents
a plain file under the directory listing """
EMPTY_GIF = "data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=="
""" Simple base 64 encoded empty gif to avoid possible image
corruption while rendering empty images on browser """
class FileServer(netius.servers.HTTP2Server):
"""
Simple implementation of a file server that is able to list files
for directories taking into account the base path values.
This is a synchronous implementation meaning that the server loop
will block for the various I/O operations to be performed.
Current implementation supports byte ranges so that partial retrieval
of a file is possible.
"""
def __init__(
self,
base_path = "",
style_urls = [],
index_files = [],
path_regex = [],
list_dirs = True,
list_engine = "base",
cors = False,
cache = 0,
*args,
**kwargs
):
netius.servers.HTTP2Server.__init__(self, *args, **kwargs)
self.base_path = base_path
self.style_urls = style_urls
self.index_files = index_files
self.path_regex = path_regex
self.list_dirs = list_dirs
self.list_engine = list_engine
self.cors = cors
self.cache = cache
@classmethod
def _sorter_build(cls, name = None):
def sorter(item):
is_dir = item["is_dir"]
is_top = item["name"] == ".."
is_dir_v = 0 if is_dir else 1
is_dir_v = -1 if is_top else is_dir_v
if name == "name": return (item["name"], is_dir_v)
if name == "modified": return (item["modified"], is_dir_v)
if name == "size": return (item["size"], is_dir_v)
if name == "type": return (item["type"], is_dir_v)
return (is_dir_v, item["name"])
return sorter
@classmethod
def _items_normalize(
cls,
items,
path,
pad = False,
space = True,
simplified = False
):
_items = []
for item in items:
if netius.legacy.PYTHON_3: item_s = item
else: item_s = item.encode("utf-8")
path_f = os.path.join(path, item)
if not os.path.exists(path_f): continue
is_dir = os.path.isdir(path_f)
item_s = item_s + "/" if is_dir and pad else item_s
item_q = netius.legacy.quote(item_s)
_time = os.path.getmtime(path_f)
date_time = datetime.datetime.utcfromtimestamp(_time)
time_s = date_time.strftime("%Y-%m-%d %H:%M")
size = 0 if is_dir else os.path.getsize(path_f)
size_s = netius.common.size_round_unit(
size,
space = space,
simplified = simplified
)
size_s = "-" if is_dir else size_s
type_s, _encoding = mimetypes.guess_type(path_f, strict = True)
type_s = type_s or "-"
type_s = "Directory" if is_dir else type_s
icon = FOLDER_SVG if is_dir else FILE_SVG
_item = dict(
name = item,
name_s = item_s,
name_q = item_q,
is_dir = is_dir,
path = path_f,
modified = time_s,
size = size,
size_s = size_s,
type = type_s,
type_s = type_s,
icon = icon
)
_items.append(_item)
return _items
@classmethod
def _gen_dir(cls, engine, path, path_v, query_m, style = True, style_urls = [], **kwargs):
gen_dir_method = getattr(cls, "_gen_dir_" + engine)
return gen_dir_method(
path,
path_v,
query_m,
style = style,
style_urls = style_urls,
**kwargs
)
@classmethod
def _gen_dir_base(cls, path, path_v, query_m, style = True, style_urls = [], **kwargs):
sort = query_m.get("sort", [])
direction = query_m.get("direction", [])
sort = sort[0] if sort else None
direction = direction[0] if direction else "asc"
reverse = direction == "desc"
_direction = "desc" if direction == "asc" else "asc"
items = os.listdir(path)
is_root = path_v == "" or path_v == "/"
if not is_root: items.insert(0, "..")
items = cls._items_normalize(items, path, pad = not style)
items.sort(key = lambda v: v["name"])
items.sort(
key = cls._sorter_build(name = sort),
reverse = reverse
)
path_n = path_v.rstrip("/")
path_b = []
current = str()
paths = path_n.split("/")
for item in paths[:-1]:
current += item + "/"
path_b.append(" <a href=\"%s\">%s</a> " % (current, item or "/"))
if not item: continue
path_b.append("<span>/</span>")
path_b.append(" <span>%s</span>" % (paths[-1] or "/"))
path_s = "".join(path_b)
path_s = path_s.strip()
for value in cls._gen_header(
"Index of %s" % (path_n or "/"),
style = style,
style_urls = style_urls
):
yield value
yield "<body>"
yield "<h1 class=\"path\">Index of %s</h1>" % path_s
yield "<hr/>"
yield "<table>"
yield "<thead>"
yield "<tr>"
yield "<th align=\"left\" width=\"350\">"
yield "<a href=\"?sort=name&direction=%s\" class=\"%s\">Name</a>" %\
(_direction, "selected" if sort == "name" else "")
yield "</th>"
yield "<th align=\"left\" width=\"130\">"
yield "<a href=\"?sort=modified&direction=%s\" class=\"%s\">Last Modified</a>" %\
(_direction, "selected" if sort == "modified" else "")
yield "</th>"
yield "<th align=\"left\" width=\"80\">"
yield "<a href=\"?sort=size&direction=%s\" class=\"%s\">Size</a></th>" %\
(_direction, "selected" if sort == "size" else "")
yield "</th>"
yield "<th align=\"left\" width=\"200\">"
yield "<a href=\"?sort=type&direction=%s\" class=\"%s\">Type</a></th>" %\
(_direction, "selected" if sort == "type" else "")
yield "</th>"
yield "</tr>"
yield "</thead>"
yield "<tbody>"
for item in items:
yield "<tr>"
yield "<td>"
if style: yield item["icon"]
yield "<a href=\"%s\">%s</a>" % (item["name_q"], item["name_s"])
yield "</td>"
yield "<td>%s</td>" % item["modified"]
yield "<td>%s</td>" % item["size_s"]
yield "<td>%s</td>" % item["type_s"]
yield "</tr>"
yield "</tbody>"
yield "</table>"
yield "<hr/>"
yield "<span>"
yield netius.IDENTIFIER
yield "</span>"
yield "</body>"
for value in cls._gen_footer(): yield value
@classmethod
def _gen_dir_apache(cls, path, path_v, query_m, **kwargs):
sort = query_m.get("sort", [])
direction = query_m.get("direction", [])
sort = sort[0] if sort else None
direction = direction[0] if direction else "asc"
reverse = direction == "desc"
_direction = "desc" if direction == "asc" else "asc"
items = os.listdir(path)
items.insert(0, "..")
items = cls._items_normalize(
items,
path,
pad = True,
space = False,
simplified = True
)
items.sort(key = lambda v: v["name"])
items.sort(
key = cls._sorter_build(name = sort),
reverse = reverse
)
path_n = path_v.rstrip("/")
for value in cls._gen_header("Index of %s" % (path_n or "/"), style = False, meta = False):
yield value
yield "<body>"
yield "<h1 class=\"path\">Index of %s</h1>" % (path_n or "/")
yield "<table>"
yield "<tr>"
yield "<th valign=\"top\"><img src=\"%s\" alt=\"[ICO]\"></th>" % EMPTY_GIF
yield "<th>"
yield "<a href=\"?sort=name&direction=%s\" class=\"%s\">Name</a>" %\
(_direction, "selected" if sort == "name" else "")
yield "</th>"
yield "<th>"
yield "<a href=\"?sort=modified&direction=%s\" class=\"%s\">Last modified</a>" %\
(_direction, "selected" if sort == "modified" else "")
yield "</th>"
yield "<th>"
yield "<a href=\"?sort=size&direction=%s\" class=\"%s\">Size</a></th>" %\
(_direction, "selected" if sort == "size" else "")
yield "</th>"
yield "<th>"
yield "<a href=\"?sort=description&direction=%s\" class=\"%s\">Description</a></th>" %\
(_direction, "selected" if sort == "description" else "")
yield "</th>"
yield "</tr>"
yield "<tr><th colspan=\"5\"><hr></th></tr>"
for item in items:
if item["name_s"] == "..": type_s = "PARENTDIR"
elif item["is_dir"]: type_s = "DIR"
else: type_s = "ARC"
if item["name_s"] == "..": name_s = "Parent Directory"
elif item["is_dir"]: name_s = item["name_s"] + "/"
else: name_s = item["name_s"]
if item["is_dir"]: name_q = item["name_q"] + "/"
else: name_q = item["name_q"]
yield "<tr>"
yield "<td valign=\"top\"><img src=\"%s\" alt=\"[%s]\"></td>" % (EMPTY_GIF, type_s)
yield "<td><a href=\"%s\">%s</a></td>" % (name_q, name_s)
yield "<td>%s</td>" % item["modified"]
yield "<td align=\"right\">%s</td>" % item["size_s"]
yield "<td>%s</td>" % item["type_s"]
yield "</tr>"
yield "\n"
yield "<tr><th colspan=\"5\"><hr></th></tr>"
yield "</table>"
yield "<address>%s</address>" % netius.IDENTIFIER
yield "</body>"
for value in cls._gen_footer(): yield value
@classmethod
def _gen_dir_legacy(cls, path, path_v, query_m, **kwargs):
max_length = kwargs.get("max_length", 24)
spacing = kwargs.get("spacing", 2)
sort = query_m.get("sort", [])
direction = query_m.get("direction", [])
sort = sort[0] if sort else None
direction = direction[0] if direction else "asc"
reverse = direction == "desc"
_direction = "desc" if direction == "asc" else "asc"
items = os.listdir(path)
items.insert(0, "..")
items = cls._items_normalize(
items,
path,
pad = True,
space = False,
simplified = True
)
items.sort(key = lambda v: v["name"])
items.sort(
key = cls._sorter_build(name = sort),
reverse = reverse
)
max_length = max([len(item["name_s"]) for item in items] + [max_length])
padding_s = (max_length + spacing - 4) * " "
spacing_s = spacing * " "
path_n = path_v.rstrip("/")
for value in cls._gen_header("Index of %s" % (path_n or "/"), style = False, meta = False):
yield value
yield "<body>"
yield "<h1 class=\"path\">Index of %s</h1>" % (path_n or "/")
yield "<hr/>"
yield "<pre>"
yield "<img src=\"%s\" alt=\"Icon \">" % EMPTY_GIF
yield "<a href=\"?sort=name&direction=%s\" class=\"%s\">Name</a>" %\
(_direction, "selected" if sort == "name" else "")
yield padding_s
yield "<a href=\"?sort=modified&direction=%s\" class=\"%s\">Last modified</a>" %\
(_direction, "selected" if sort == "modified" else "")
yield " "
yield spacing_s
yield "<a href=\"?sort=size&direction=%s\" class=\"%s\">Size</a></th>" %\
(_direction, "selected" if sort == "size" else "")
yield "<hr/>"
for item in items:
if item["name_s"] == "..": type_s = "PARENTDIR"
elif item["is_dir"]: type_s = "DIR"
else: type_s = "ARC"
if item["name_s"] == "..": name_s = "Parent Directory"
elif item["is_dir"]: name_s = item["name_s"] + "/"
else: name_s = item["name_s"]
if item["is_dir"]: name_q = item["name_q"] + "/"
else: name_q = item["name_q"]
name_s = name_s[:max_length]
padding_r = max_length - len(name_s)
yield "<img src=\"%s\" alt=\"[%s]\" />" % (EMPTY_GIF, type_s)
yield "<a href=\"%s\">%s</a>" % (name_q, name_s)
yield " " * padding_r
yield spacing_s
yield "%s%s%s" % (item["modified"], spacing_s, item["size_s"].ljust(5))
yield spacing_s
yield "\n"
yield "<hr/>"
yield "</pre>"
yield "<address>%s</address>" % netius.IDENTIFIER
yield "</body>"
for value in cls._gen_footer(): yield value
def on_connection_d(self, connection):
netius.servers.HTTP2Server.on_connection_d(self, connection)
file = hasattr(connection, "file") and connection.file
if file: file.close()
setattr(connection, "file", None)
setattr(connection, "range", None)
setattr(connection, "bytes_p", None)
setattr(connection, "queue", None)
def on_stream_d(self, stream):
file = hasattr(stream, "file") and stream.file
if file: file.close()
setattr(stream, "file", None)
setattr(stream, "range", None)
setattr(stream, "bytes_p", None)
setattr(stream, "queue", None)
def on_serve(self):
netius.servers.HTTP2Server.on_serve(self)
if self.env: self.base_path = self.get_env("BASE_PATH", self.base_path)
if self.env: self.style_urls = self.get_env("STYLE_URLS", self.style_urls, cast = list)
if self.env: self.index_files = self.get_env("INDEX_FILES", self.index_files, cast = list)
if self.env: self.path_regex = self.get_env(
"PATH_REGEX",
self.path_regex,
cast = lambda v: [i.split(":") for i in v.split(";")]
)
if self.env: self.list_dirs = self.get_env("LIST_DIRS", self.list_dirs, cast = bool)
if self.env: self.list_engine = self.get_env("LIST_ENGINE", self.list_engine)
if self.env: self.cors = self.get_env("CORS", self.cors, cast = bool)
if self.env: self.cache = self.get_env("CACHE", self.cache, cast = int)
self._build_regex()
self.base_path = os.path.abspath(self.base_path)
self.cache_d = datetime.timedelta(seconds = self.cache)
self.base_path = netius.legacy.u(self.base_path, force = True)
self.info("Defining '%s' as the root of the file server ..." % (self.base_path or "."))
if self.list_dirs: self.info("Listing directories with '%s' engine ..." % self.list_engine)
if self.cors: self.info("Cross origin resource sharing is enabled")
if self.cache: self.info("Resource cache set with %d seconds" % self.cache)
def on_data_http(self, connection, parser):
netius.servers.HTTP2Server.on_data_http(self, connection, parser)
# verifies if the current connection contains a reference to the
# file object, in case it exists there's a file currently being
# handled by the connection and so the current data processing
# must be delayed until the file is processed (inserted in queue)
if hasattr(connection, "file") and connection.file:
if not hasattr(connection, "queue"): connection.queue = []
state = parser.get_state()
connection.queue.append(state)
return
try:
# retrieves the requested path from the parser and the constructs
# the correct file name/path to be used in the reading from the
# current file system, so that it's possible to handle the data
path = parser.get_path(normalize = True)
path = netius.legacy.unquote(path)
path = path.lstrip("/")
path = netius.legacy.u(path, force = True)
path = self._resolve(path)
path_f = os.path.join(self.base_path, path)
path_f = os.path.abspath(path_f)
path_f = os.path.normpath(path_f)
# retrieves the current file system encoding and determines if it
# it's required to decode the path into an unicode string, if that's
# the case the normal decoding process is used using the currently
# defined file system encoding as defined in the specification
path_f = netius.legacy.u(path_f, encoding = "utf-8", force = True)
# verifies if the provided path starts with the contents of the
# base path in case it does not it's a security issue and a proper
# exception must be raised indicating the issue
is_sub = path_f.startswith(self.base_path)
if not is_sub: raise netius.SecurityError("Invalid path")
# verifies if the requested file exists in case it does not
# raises an error indicating the problem so that the user is
# notified about the failure to find the appropriate file
if not os.path.exists(path_f): self.on_no_file(connection); return
# verifies if the currently resolved path refers an directory or
# instead a normal file and handles each of the cases properly by
# redirecting the request to the proper handlers
is_dir = os.path.isdir(path_f)
if is_dir: self.on_dir_file(connection, parser, path_f)
else: self.on_normal_file(connection, parser, path_f)
except BaseException as exception:
# handles the exception gracefully by sending the contents of
# it to the client and identifying the problem correctly
self.on_exception_file(connection, exception)
def on_dir_file(self, connection, parser, path, style = True):
cls = self.__class__
path_v = parser.get_path()
path_v = netius.legacy.unquote(path_v)
query_v = parser.get_query()
query_m = parser._parse_query(query_v)
is_valid = path_v.endswith("/")
if not is_valid:
path_q = netius.legacy.quote(path_v)
connection.send_response(
data = "Permanent redirect",
headers = dict(
location = path_q + "/"
),
code = 301,
apply = True
)
return
for index_file in self.index_files:
index_path = os.path.join(path, index_file)
if not os.path.exists(index_path): continue
return self.on_normal_file(connection, parser, index_path)
if not self.list_dirs:
self.on_no_file(connection)
return
data = "".join(cls._gen_dir(
self.list_engine,
path,
path_v,
query_m,
style = style,
style_urls = self.style_urls
))
data = netius.legacy.bytes(data, encoding = "utf-8", force = True)
headers = dict()
headers["content-type"] = "text/html"
connection.send_response(
data = data,
headers = headers,
code = 200,
apply = True,
callback = self._file_check_close
)
def on_normal_file(self, connection, parser, path):
# encodes the current path in case it's currently represented by
# a string, this is going to avoid problems in the logging of the
# path that is being requested (unicode encoding problems)
path_s = path if netius.legacy.is_str(path) else path.encode("utf-8")
# prints a debug message about the file that is going to be read
# from the current file system to be sent to the connection
self.debug("Reading file '%s' from file system" % path_s)
# uses the parser from the connection to be able to gather the
# range as a string to be used latter for conversion
range_s = parser.headers.get("range", None)
is_partial = True if range_s else False
# retrieves the last modified timestamp for the resource path and
# uses it to create the ETag for the resource to be served
modified = os.path.getmtime(path)
etag = "netius-%.2f" % modified
# retrieves the header that describes the previous version in the
# client side (client side ETag) and compares both of the ETags to
# verify if the file changed meanwhile or not
_etag = parser.headers.get("if-none-match", None)
not_modified = etag == _etag
# in case the file did not change in the mean time the not modified
# callback must be called to correctly handled the file no change
if not_modified: self.on_not_modified(connection, path); return
# tries to guess the mime type of the file present in the target
# file path that is going to be returned, this may fail as it's not
# always possible to determine the correct mime type for a file
# for suck situations the default mime type is used
type, _encoding = mimetypes.guess_type(path, strict = True)
type = type or "application/octet-stream"
# retrieves the size of the file that has just be resolved using
# the currently provided path value and then associates the file
# with the current connection
file_size = os.path.getsize(path)
file = open(path, "rb")
connection.file = file
# convert the current string based representation of the range
# into a tuple based presentation otherwise creates the default
# tuple containing the initial position and the final one
if is_partial:
range_s = range_s[6:]
start_s, end_s = range_s.split("-", 1)
start = int(start_s) if start_s else 0
end = int(end_s) if end_s else file_size - 1
range = (start, end)
else: range = (0, file_size - 1)
# calculates the real data size of the chunk that is going to be
# sent to the client this must use the normal range approach
data_size = range[1] - range[0] + 1
# associates the range tuple with the current connection as it's
# going to be used latter for additional computation
connection.range = range
connection.bytes_p = data_size
# seeks the current file to the initial position where it's going
# to start it's reading processing as according to the range
file.seek(range[0])
# creates the string that will represent the content range that is
# going to be returned to the client in the current request
content_range_s = "bytes %d-%d/%d" % (range[0], range[1], file_size)
# creates the map that will hold the various header values for the
# the current message to be sent it may contain both the length
# of the file that is going to be returned and the type of it
headers = dict()
headers["etag"] = etag
headers["content-length"] = "%d" % data_size
if self.cors: headers["access-control-allow-origin"] = "*"
if type: headers["content-type"] = type
if is_partial: headers["content-range"] = content_range_s
if not is_partial: headers["accept-ranges"] = "bytes"
# in case there's a valid cache defined must populate the proper header
# fields so that cache is applied to the request
if self.cache:
current = datetime.datetime.utcnow()
target = current + self.cache_d
target_s = target.strftime("%a, %d %b %Y %H:%M:%S GMT")
cache_s = "public, max-age=%d" % self.cache
headers["expires"] = target_s
headers["cache-control"] = cache_s
# "calculates" the proper returning code taking into account if the
# current data to be sent is partial or not
code = 206 if is_partial else 200
# sends the initial part of the file response containing the headers
# and the description of the file (includes size) the callback to this
# operation is the initial sending of the file contents so that the
# sending of the proper file contents starts with success
connection.send_response(
headers = headers,
code = code,
apply = True,
final = False,
flush = False,
callback = self._file_send
)
def on_no_file(self, connection):
cls = self.__class__
connection.send_response(
data = cls.build_text(
"File not found",
style_urls = self.style_urls
),
headers = dict(
connection = "close"
),
code = 404,
apply = True,
callback = self._file_close
)
def on_exception_file(self, connection, exception):
cls = self.__class__
connection.send_response(
data = cls.build_text(
"Problem handling request - %s" % str(exception),
trace = self.is_devel(),
style_urls = self.style_urls
),
headers = dict(
connection = "close"
),
code = 500,
apply = True,
callback = self._file_close
)
def on_not_modified(self, connection, path):
connection.set_encoding(netius.common.PLAIN_ENCODING)
connection.send_response(
data = "",
code = 304,
apply = True
)
def _next_queue(self, connection):
# verifies if the current connection already contains a reference to
# the queue structure that handles the queuing/pipelining of requests
# if it does not or the queue is empty returns immediately, as there's
# nothing currently pending to be done/processed
if not hasattr(connection, "queue"): return
if not connection.queue: return
# retrieves the state (of the parser) as the payload of the next element
# in the queue and then uses it to construct a mock parser object that is
# going to be used to simulate an on data call to the file server
state = connection.queue.pop(0)
parser = netius.common.HTTPParser.mock(connection.parser.owner, state)
try: self.on_data_http(connection, parser)
finally: parser.destroy()
def _file_send(self, connection):
file = connection.file
range = connection.range
is_larger = BUFFER_SIZE > connection.bytes_p
buffer_s = connection.bytes_p if is_larger else BUFFER_SIZE
data = file.read(buffer_s)
data_l = len(data) if data else 0
connection.bytes_p -= data_l
is_final = not data or connection.bytes_p == 0
callback = self._file_finish if is_final else self._file_send
connection.send_part(
data,
final = False,
callback = callback
)
def _file_finish(self, connection):
connection.file.close()
connection.file = None
connection.range = None
connection.bytes_p = None
is_keep_alive = connection.parser.keep_alive
callback = None if is_keep_alive else self._file_close
connection.flush_s(callback = callback)
self._next_queue(connection)
def _file_close(self, connection):
connection.close(flush = True)
def _file_check_close(self, connection):
if connection.parser.keep_alive: return
connection.close(flush = True)
def _resolve(self, path):
path, result = self._resolve_regex(path)
if result: return path
return path
def _build_regex(self):
self.path_regex = [(re.compile(regex), value) for regex, value in self.path_regex]
def _resolve_regex(self, path):
for regex, value in self.path_regex:
if not regex.match(path): continue
return (value, True)
return (path, False)
if __name__ == "__main__":
import logging
server = FileServer(level = logging.INFO)
server.serve(env = True)
else:
__path__ = []
| apache-2.0 |
neumerance/deploy | openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py | 6 | 3083 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
import logging
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.generic import TemplateView # noqa
from django.views.generic import View # noqa
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import forms as project_forms
LOG = logging.getLogger(__name__)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = project_forms.ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create keypair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(mimetype='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
| apache-2.0 |
keflavich/pvextractor | pvextractor/utils/wcs_utils.py | 2 | 2457 | import numpy as np
from astropy import units as u
from astropy.wcs import WCSSUB_CELESTIAL, WCSSUB_SPECTRAL
def get_spatial_scale(wcs, assert_square=True):
# Code adapted from APLpy
wcs = wcs.sub([WCSSUB_CELESTIAL])
cdelt = np.matrix(wcs.wcs.get_cdelt())
pc = np.matrix(wcs.wcs.get_pc())
scale = np.array(cdelt * pc)
if assert_square:
try:
np.testing.assert_almost_equal(abs(cdelt[0,0]), abs(cdelt[0,1]))
np.testing.assert_almost_equal(abs(pc[0,0]), abs(pc[1,1]))
np.testing.assert_almost_equal(abs(scale[0,0]), abs(scale[0,1]))
except AssertionError:
raise ValueError("Non-square pixels. Please resample data.")
return abs(scale[0,0]) * u.Unit(wcs.wcs.cunit[0])
def get_spectral_scale(wcs):
# Code adapted from APLpy
wcs = wcs.sub([WCSSUB_SPECTRAL])
cdelt = np.matrix(wcs.wcs.get_cdelt())
pc = np.matrix(wcs.wcs.get_pc())
scale = np.array(cdelt * pc)
return abs(scale[0,0]) * u.Unit(wcs.wcs.cunit[0])
def sanitize_wcs(mywcs):
pc = np.matrix(mywcs.wcs.get_pc())
if (pc[:,2].sum() != pc[2,2] or pc[2,:].sum() != pc[2,2]):
raise ValueError("Non-independent 3rd axis.")
axtypes = mywcs.get_axis_types()
if ((axtypes[0]['coordinate_type'] != 'celestial' or
axtypes[1]['coordinate_type'] != 'celestial' or
axtypes[2]['coordinate_type'] != 'spectral')):
cunit3 = mywcs.wcs.cunit[2]
ctype3 = mywcs.wcs.ctype[2]
if cunit3 != '':
cunit3 = u.Unit(cunit3)
if cunit3.is_equivalent(u.m/u.s):
mywcs.wcs.ctype[2] = 'VELO'
elif cunit3.is_equivalent(u.Hz):
mywcs.wcs.ctype[2] = 'FREQ'
elif cunit3.is_equivalent(u.m):
mywcs.wcs.ctype[2] = 'WAVE'
else:
raise ValueError("Could not determine type of 3rd axis.")
elif ctype3 != '':
if 'VELO' in ctype3:
mywcs.wcs.ctype[2] = 'VELO'
elif 'FELO' in ctype3:
mywcs.wcs.ctype[2] = 'VELO-F2V'
elif 'FREQ' in ctype3:
mywcs.wcs.ctype[2] = 'FREQ'
elif 'WAVE' in ctype3:
mywcs.wcs.ctype[2] = 'WAVE'
else:
raise ValueError("Could not determine type of 3rd axis.")
else:
raise ValueError("Cube axes not in expected orientation: PPV")
return mywcs
| bsd-3-clause |
OpenMDM/OpenMDM | OpenMDM/settings.py | 1 | 3172 | """
Django settings for OpenMDM project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ldap
from common.local.settings import *
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType, PosixGroupType
from mongoengine import connect
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7d0q=o@j-qmq=u09$p%6cq03))+%&qfvz+gbw^^y2y_5pa7&v&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'public_gate',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
# 'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'OpenMDM.urls'
WSGI_APPLICATION = 'OpenMDM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': CONFIG['local']['database']
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'src'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# plist group
RETRIEVE_PLIST_FROM_GROUPS = "all"
# RETRIEVE_PLIST_FROM_GROUPS = "first"
# LDAP
AUTH_LDAP_SERVER_URI = CONFIG['local']['ldap']['SERVER_URI']
# Diect bind
# AUTH_LDAP_USER_DN_TEMPLATE = "cn=%(user)s,ou=users,dc=ldap,dc=hackndo,dc=com"
# Search / Bind
AUTH_LDAP_BIND_DN = CONFIG['local']['ldap']['BIND_DN']
AUTH_LDAP_BIND_PASSWORD = CONFIG['local']['ldap']['BIND_PASSWORD']
AUTH_LDAP_USER_SEARCH = CONFIG['local']['ldap']['USER_SEARCH']
# Finding groups
AUTH_LDAP_GROUP_SEARCH = CONFIG['local']['ldap']['GROUP_SEARCH']
AUTH_LDAP_GROUP_TYPE = CONFIG['local']['ldap']['GROUP_TYPE']
AUTH_LDAP_REQUIRE_GROUP = CONFIG['local']['ldap']['REQUIRE_GROUP']
connect(CONFIG['local']['mongo']['DB']) | apache-2.0 |
talishte/ctigre | env/lib/python2.7/site-packages/pip/_vendor/requests/api.py | 206 | 4935 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| bsd-2-clause |
natbraun/biggraphite | biggraphite/cli/command.py | 1 | 2819 | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command Skeleton."""
import argparse
import datetime
import parsedatetime
class BaseCommand(object):
"""Base command."""
NAME = ""
HELP = ""
def add_arguments(self, parser):
"""Add custom arguments.
Args:
parser: argparse.Parser
"""
pass
def run(self, accessor, opts):
"""Run the command.
Args:
accessor: biggraphite.accessor.Accessor
opts: argparse.Namespace
"""
pass
# Following code comes from https://github.com/Clemson-DPA/dpa-pipe.
class ParseDateTimeArg(argparse.Action):
"""argparse.Action subclass. parses natural language cl datetime strings.
Use this class as an argument to the 'action' argument when calling
add_argument on an argparse parser. When the command line arguments are
parsed, the resulting namespace will have a datetime.datetime object
assigned to the argument's destination.
If a datetime could not be parsed from the string, a ValueError will be
raised.
Examples of parsable human readable datetime strings:
"now", "yesterday", "2 weeks from now", "3 days ago", etc.
Note: When the datetime string is more than one word, you should include
the argument in quotes on the command line.
"""
def __call__(self, parser, namespace, datetime_str, option_string=None):
"""Do the thing."""
parsed_datetime = date_time_from_str(datetime_str)
setattr(namespace, self.dest, parsed_datetime)
def date_time_from_str(datetime_str):
"""Parse a humanly readable datetime.
Args:
datetime_str: str, humanly readable date time.
Returns:
datetime, correctponding datetime object.
"""
cal = parsedatetime.Calendar()
parsed_result, date_type = cal.parse(datetime_str)
parsed_datetime = None
if date_type == 3:
# parsed_result is a datetime
parsed_datetime = parsed_result
elif date_type in (1, 2):
# parsed_result is struct_time
parsed_datetime = datetime.datetime(*parsed_result[:6])
else:
# Failed to parse
raise ValueError("Could not parse date/time string: " + datetime_str)
return parsed_datetime
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/oracle_sink.py | 2 | 2726 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_sink import CopySink
class OracleSink(CopySink):
"""A copy activity Oracle sink.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param write_batch_size: Write batch size. Type: integer (or Expression
with resultType integer), minimum: 0.
:type write_batch_size: object
:param write_batch_timeout: Write batch timeout. Type: string (or
Expression with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type write_batch_timeout: object
:param sink_retry_count: Sink retry count. Type: integer (or Expression
with resultType integer).
:type sink_retry_count: object
:param sink_retry_wait: Sink retry wait. Type: string (or Expression with
resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type sink_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param pre_copy_script: SQL pre-copy script. Type: string (or Expression
with resultType string).
:type pre_copy_script: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'write_batch_size': {'key': 'writeBatchSize', 'type': 'object'},
'write_batch_timeout': {'key': 'writeBatchTimeout', 'type': 'object'},
'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'},
'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'},
}
def __init__(self, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, pre_copy_script=None):
super(OracleSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait)
self.pre_copy_script = pre_copy_script
self.type = 'OracleSink'
| mit |
nightjean/Deep-Learning | tensorflow/contrib/specs/python/summaries.py | 164 | 8831 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for summarizing and describing TensorFlow graphs.
This contains functions that generate string descriptions from
TensorFlow graphs, for debugging, testing, and model size
estimation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.specs.python import specs
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# These are short abbreviations for common TensorFlow operations used
# in test cases with tf_structure to verify that specs_lib generates a
# graph structure with the right operations. Operations outside the
# scope of specs (e.g., Const and Placeholder) are just assigned "_"
# since they are not relevant to testing.
SHORT_NAMES_SRC = """
BiasAdd biasadd
Const _
Conv2D conv
MatMul dot
Placeholder _
Sigmoid sig
Variable var
""".split()
SHORT_NAMES = {
x: y
for x, y in zip(SHORT_NAMES_SRC[::2], SHORT_NAMES_SRC[1::2])
}
def _truncate_structure(x):
"""A helper function that disables recursion in tf_structure.
Some constructs (e.g., HorizontalLstm) are complex unrolled
structures and don't need to be represented in the output
of tf_structure or tf_print. This helper function defines
which tree branches should be pruned. This is a very imperfect
way of dealing with unrolled LSTM's (since it truncates
useful information as well), but it's not worth doing something
better until the new fused and unrolled ops are ready.
Args:
x: a Tensor or Op
Returns:
A bool indicating whether the subtree should be pruned.
"""
if "/HorizontalLstm/" in x.name:
return True
return False
def tf_structure(x, include_shapes=False, finished=None):
"""A postfix expression summarizing the TF graph.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
x: a tf.Tensor or tf.Operation
include_shapes: include shapes in the output string
finished: a set of ops that have already been output
Returns:
A string representing the structure as a string of
postfix operations.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = []
if x in finished:
return " <>"
finished |= {x}
result = ""
if not _truncate_structure(x):
for y in x.inputs:
result += tf_structure(y, include_shapes, finished)
if include_shapes:
result += " %s" % (shape,)
if x.type != "Identity":
name = SHORT_NAMES.get(x.type, x.type.lower())
result += " " + name
return result
def tf_print(x, depth=0, finished=None, printer=print):
"""A simple print function for a TensorFlow graph.
Args:
x: a tf.Tensor or tf.Operation
depth: current printing depth
finished: set of nodes already output
printer: print function to use
Returns:
Total number of parameters found in the
subtree.
"""
if finished is None:
finished = set()
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
if x.type == "Identity":
x = x.inputs[0].op
if x in finished:
printer("%s<%s> %s %s" % (" " * depth, x.name, x.type, shape))
return
finished |= {x}
printer("%s%s %s %s" % (" " * depth, x.name, x.type, shape))
if not _truncate_structure(x):
for y in x.inputs:
tf_print(y, depth + 1, finished, printer=printer)
def tf_num_params(x):
"""Number of parameters in a TensorFlow subgraph.
Args:
x: root of the subgraph (Tensor, Operation)
Returns:
Total number of elements found in all Variables
in the subgraph.
"""
if isinstance(x, ops.Tensor):
shape = x.get_shape()
x = x.op
if x.type in ["Variable", "VariableV2"]:
return shape.num_elements()
totals = [tf_num_params(y) for y in x.inputs]
return sum(totals)
def tf_left_split(op):
"""Split the parameters of op for left recursion.
Args:
op: tf.Operation
Returns:
A tuple of the leftmost input tensor and a list of the
remaining arguments.
"""
if len(op.inputs) < 1:
return None, []
if op.type == "Concat":
return op.inputs[1], op.inputs[2:]
return op.inputs[0], op.inputs[1:]
def tf_parameter_iter(x):
"""Iterate over the left branches of a graph and yield sizes.
Args:
x: root of the subgraph (Tensor, Operation)
Yields:
A triple of name, number of params, and shape.
"""
while 1:
if isinstance(x, ops.Tensor):
shape = x.get_shape().as_list()
x = x.op
else:
shape = ""
left, right = tf_left_split(x)
totals = [tf_num_params(y) for y in right]
total = sum(totals)
yield x.name, total, shape
if left is None:
break
x = left
def _combine_filter(x):
"""A filter for combining successive layers with similar names."""
last_name = None
last_total = 0
last_shape = None
for name, total, shape in x:
name = re.sub("/.*", "", name)
if name == last_name:
last_total += total
continue
if last_name is not None:
yield last_name, last_total, last_shape
last_name = name
last_total = total
last_shape = shape
if last_name is not None:
yield last_name, last_total, last_shape
def tf_parameter_summary(x, printer=print, combine=True):
"""Summarize parameters by depth.
Args:
x: root of the subgraph (Tensor, Operation)
printer: print function for output
combine: combine layers by top-level scope
"""
seq = tf_parameter_iter(x)
if combine:
seq = _combine_filter(seq)
seq = reversed(list(seq))
for name, total, shape in seq:
printer("%10d %-20s %s" % (total, name, shape))
def tf_spec_structure(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Return a postfix representation of the specification.
This is intended to be used as part of test cases to
check for gross differences in the structure of the graph.
The resulting string is not invertible or unabiguous
and cannot be used to reconstruct the graph accurately.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: tensor shape (in lieu of inputs)
input_type: type of the input tensor
Returns:
A string with a postfix representation of the
specification.
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
return str(tf_structure(outputs).strip())
def tf_spec_summary(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Output a summary of the specification.
This prints a list of left-most tensor operations and summarized the
variables found in the right branches. This kind of representation
is particularly useful for networks that are generally structured
like pipelines.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_parameter_summary(outputs)
def tf_spec_print(spec,
inputs=None,
input_shape=None,
input_type=dtypes.float32):
"""Print a tree representing the spec.
Args:
spec: specification
inputs: input to the spec construction (usually a Tensor)
input_shape: optional shape of input
input_type: type of the input tensor
"""
if inputs is None:
inputs = array_ops.placeholder(input_type, input_shape)
outputs = specs.create_net(spec, inputs)
tf_print(outputs)
| apache-2.0 |
zhouzhenghui/python-for-android | python3-alpha/extra_modules/gdata/tlslite/utils/dateFuncs.py | 407 | 2181 |
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
| apache-2.0 |
obulpathi/datascience | neural-networks/src/network2.py | 7 | 14296 | """network2.py
~~~~~~~~~~~~~~
An improved version of network.py, implementing the stochastic
gradient descent learning algorithm for a feedforward neural network.
Improvements include the addition of the cross-entropy cost function,
regularization, and better initialization of network weights. Note
that I have focused on making the code simple, easily readable, and
easily modifiable. It is not optimized, and omits many desirable
features.
"""
#### Libraries
# Standard library
import json
import random
import sys
# Third-party libraries
import numpy as np
#### Define the quadratic and cross-entropy cost functions
class QuadraticCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``.
"""
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer."""
return (a-y) * sigmoid_prime(z)
class CrossEntropyCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``. Note that np.nan_to_num is used to ensure numerical
stability. In particular, if both ``a`` and ``y`` have a 1.0
in the same slot, then the expression (1-y)*np.log(1-a)
returns nan. The np.nan_to_num ensures that that is converted
to the correct value (0.0).
"""
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer. Note that the
parameter ``z`` is not used by the method. It is included in
the method's parameters in order to make the interface
consistent with the delta method for other cost classes.
"""
return (a-y)
#### Main Network class
class Network(object):
def __init__(self, sizes, cost=CrossEntropyCost):
"""The list ``sizes`` contains the number of neurons in the respective
layers of the network. For example, if the list was [2, 3, 1]
then it would be a three-layer network, with the first layer
containing 2 neurons, the second layer 3 neurons, and the
third layer 1 neuron. The biases and weights for the network
are initialized randomly, using
``self.default_weight_initializer`` (see docstring for that
method).
"""
self.num_layers = len(sizes)
self.sizes = sizes
self.default_weight_initializer()
self.cost=cost
def default_weight_initializer(self):
"""Initialize each weight using a Gaussian distribution with mean 0
and standard deviation 1 over the square root of the number of
weights connecting to the same neuron. Initialize the biases
using a Gaussian distribution with mean 0 and standard
deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def large_weight_initializer(self):
"""Initialize the weights using a Gaussian distribution with mean 0
and standard deviation 1. Initialize the biases using a
Gaussian distribution with mean 0 and standard deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
This weight and bias initializer uses the same approach as in
Chapter 1, and is included for purposes of comparison. It
will usually be better to use the default weight initializer
instead.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda = 0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False):
"""Train the neural network using mini-batch stochastic gradient
descent. The ``training_data`` is a list of tuples ``(x, y)``
representing the training inputs and the desired outputs. The
other non-optional parameters are self-explanatory, as is the
regularization parameter ``lmbda``. The method also accepts
``evaluation_data``, usually either the validation or test
data. We can monitor the cost and accuracy on either the
evaluation data or the training data, by setting the
appropriate flags. The method returns a tuple containing four
lists: the (per-epoch) costs on the evaluation data, the
accuracies on the evaluation data, the costs on the training
data, and the accuracies on the training data. All values are
evaluated at the end of each training epoch. So, for example,
if we train for 30 epochs, then the first element of the tuple
will be a 30-element list containing the cost on the
evaluation data at the end of each epoch. Note that the lists
are empty if the corresponding flag is not set.
"""
if evaluation_data: n_data = len(evaluation_data)
n = len(training_data)
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(
mini_batch, eta, lmbda, len(training_data))
print "Epoch %s training complete" % j
if monitor_training_cost:
cost = self.total_cost(training_data, lmbda)
training_cost.append(cost)
print "Cost on training data: {}".format(cost)
if monitor_training_accuracy:
accuracy = self.accuracy(training_data, convert=True)
training_accuracy.append(accuracy)
print "Accuracy on training data: {} / {}".format(
accuracy, n)
if monitor_evaluation_cost:
cost = self.total_cost(evaluation_data, lmbda, convert=True)
evaluation_cost.append(cost)
print "Cost on evaluation data: {}".format(cost)
if monitor_evaluation_accuracy:
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print "Accuracy on evaluation data: {} / {}".format(
self.accuracy(evaluation_data), n_data)
print
return evaluation_cost, evaluation_accuracy, \
training_cost, training_accuracy
def update_mini_batch(self, mini_batch, eta, lmbda, n):
"""Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. The
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
learning rate, ``lmbda`` is the regularization parameter, and
``n`` is the total size of the training data set.
"""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = (self.cost).delta(zs[-1], activations[-1], y)
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def accuracy(self, data, convert=False):
"""Return the number of inputs in ``data`` for which the neural
network outputs the correct result. The neural network's
output is assumed to be the index of whichever neuron in the
final layer has the highest activation.
The flag ``convert`` should be set to False if the data set is
validation or test data (the usual case), and to True if the
data set is the training data. The need for this flag arises
due to differences in the way the results ``y`` are
represented in the different data sets. In particular, it
flags whether we need to convert between the different
representations. It may seem strange to use different
representations for the different data sets. Why not use the
same representation for all three data sets? It's done for
efficiency reasons -- the program usually evaluates the cost
on the training data and the accuracy on other data sets.
These are different types of computations, and using different
representations speeds things up. More details on the
representations can be found in
mnist_loader.load_data_wrapper.
"""
if convert:
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
for (x, y) in data]
else:
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
return sum(int(x == y) for (x, y) in results)
def total_cost(self, data, lmbda, convert=False):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for x, y in data:
a = self.feedforward(x)
if convert: y = vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(lmbda/len(data))*sum(
np.linalg.norm(w)**2 for w in self.weights)
return cost
def save(self, filename):
"""Save the neural network to the file ``filename``."""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
#### Loading a Network
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
net = Network(data["sizes"], cost=cost)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
#### Miscellaneous functions
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
and zeroes elsewhere. This is used to convert a digit (0...9)
into a corresponding desired output from the neural network.
"""
e = np.zeros((10, 1))
e[j] = 1.0
return e
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
| apache-2.0 |
2014c2g2/2015cda | static/Brython3.1.1-20150328-091302/Lib/test/pystone.py | 718 | 7379 | #! /usr/bin/python3.3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| gpl-3.0 |
takeshineshiro/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py | 1 | 86867 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import signal
import sys
import time
import uuid
import functools
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
UINT64_BITMASK = (1 << 64) - 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.conf = conf or cfg.CONF
# init bridge classes with configured datapath type.
self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
functools.partial(bridge_classes[b],
datapath_type=self.conf.OVS.datapath_type)
for b in ('br_int', 'br_phys', 'br_tun'))
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.use_call = True
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
# TODO(ethuleau): Change ARP responder so it's not dependent on the
# ML2 l2 population mechanism driver.
self.enable_distributed_routing = enable_distributed_routing
self.arp_responder_enabled = arp_responder and self.l2_pop
self.prevent_arp_spoofing = prevent_arp_spoofing
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': self.conf.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop,
'arp_responder_enabled':
self.arp_responder_enabled,
'enable_distributed_routing':
self.enable_distributed_routing,
'log_agent_heartbeats':
self.conf.AGENT.log_agent_heartbeats},
'agent_type': n_const.AGENT_TYPE_OVS,
'start_flag': True}
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
# Validate agent configurations
self._check_agent_configurations()
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK
self.int_br = self.br_int_cls(integ_br)
self.setup_integration_br()
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
self.network_ports = collections.defaultdict(set)
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
self.init_extension_manager(self.connection)
self.bridge_mappings = bridge_mappings
self.setup_physical_bridges(self.bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {p_const.TYPE_GENEVE: {},
p_const.TYPE_GRE: {},
p_const.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port
self.dont_fragment = self.conf.AGENT.dont_fragment
self.tunnel_csum = cfg.CONF.AGENT.tunnel_csum
self.tun_br = None
self.patch_int_ofport = constants.OFPORT_INVALID
self.patch_tun_ofport = constants.OFPORT_INVALID
if self.enable_tunneling:
# The patch_int_ofport and patch_tun_ofport are updated
# here inside the call to setup_tunnel_br()
self.setup_tunnel_br(tun_br)
self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
self.context,
self.dvr_plugin_rpc,
self.int_br,
self.tun_br,
self.bridge_mappings,
self.phys_brs,
self.int_ofports,
self.phys_ofports,
self.patch_int_ofport,
self.patch_tun_ofport,
self.conf.host,
self.enable_tunneling,
self.enable_distributed_routing)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
if self.enable_tunneling:
self.setup_tunnel_br_flows()
self.dvr_agent.setup_dvr_flows()
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# In order to keep existed device's local vlan unchanged,
# restore local vlan mapping at start
self._restore_local_vlan_map()
# Security group agent support
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.local_vlan_map,
defer_refresh_firewall=True)
# Initialize iteration counter
self.iter_num = 0
self.run_daemon_loop = True
self.catch_sigterm = False
self.catch_sighup = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.quitting_rpc_timeout = quitting_rpc_timeout
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
self.agent_state.get('configurations')['in_distributed_mode'] = (
self.dvr_agent.in_distributed_mode())
try:
self.state_rpc.report_state(self.context,
self.agent_state,
self.use_call)
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _restore_local_vlan_map(self):
cur_ports = self.int_br.get_vif_ports()
port_names = [p.port_name for p in cur_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "other_config", "tag"], ports=port_names)
by_name = {x['name']: x for x in port_info}
for port in cur_ports:
# if a port was deleted between get_vif_ports and
# get_ports_attributes, we
# will get a KeyError
try:
local_vlan_map = by_name[port.port_name]['other_config']
local_vlan = by_name[port.port_name]['tag']
except KeyError:
continue
if not local_vlan:
continue
net_uuid = local_vlan_map.get('net_uuid')
if (net_uuid and net_uuid not in self.local_vlan_map
and local_vlan != DEAD_VLAN_TAG):
self.provision_local_vlan(local_vlan_map['net_uuid'],
local_vlan_map['network_type'],
local_vlan_map['physical_network'],
int(local_vlan_map[
'segmentation_id']),
local_vlan)
def setup_rpc(self):
self.agent_id = 'ovs-agent-%s' % self.conf.host
self.topic = topics.AGENT
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION,
topics.UPDATE, self.conf.host])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
def init_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
self.ext_manager = (
ext_manager.AgentExtensionsManager(self.conf))
self.ext_manager.initialize(
connection, constants.EXTENSION_DRIVER_TYPE)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
if vif_id in vlan_mapping.vif_ports:
return network_id
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
deleted_ports = list(self.deleted_ports)
while self.deleted_ports:
port_id = self.deleted_ports.pop()
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
# move to dead VLAN so deleted ports no
# longer have access to the network
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
# Flush firewall rules after ports are put on dead VLAN to be
# more secure
self.sg_agent.remove_devices_filter(deleted_ports)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
if not tunnel_ip_hex:
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunnelling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip, self.local_vlan_map)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
ip = netaddr.IPAddress(ip_address)
if ip.version == 6:
return
ip = str(ip)
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning(_LW('Action %s not supported'), action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id, local_vlan=None):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local', 'geneve')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
lvm = self.local_vlan_map.get(net_uuid)
if lvm:
lvid = lvm.vlan
else:
if local_vlan in self.available_local_vlans:
lvid = local_vlan
self.available_local_vlans.remove(local_vlan)
else:
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"),
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
network_type,
physical_network,
segmentation_id)
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = list(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == p_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error(_LE("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error(_LE("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == p_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == p_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
ovs_restarted):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param ovs_restarted: indicates if this is called for an OVS restart.
'''
if net_uuid not in self.local_vlan_map or ovs_restarted:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': physical_network,
'segmentation_id': segmentation_id}
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
def _bind_devices(self, need_binding_ports):
devices_up = []
devices_down = []
port_names = [p['vif_port'].port_name for p in need_binding_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "tag"], ports=port_names, if_exists=True)
tags_by_name = {x['name']: x['tag'] for x in port_info}
for port_detail in need_binding_ports:
lvm = self.local_vlan_map.get(port_detail['network_id'])
if not lvm:
# network for port was deleted. skip this port since it
# will need to be handled as a DEAD port in the next scan
continue
port = port_detail['vif_port']
device = port_detail['device']
# Do not bind a port if it's already bound
cur_tag = tags_by_name.get(port.port_name)
if cur_tag is None:
LOG.info(_LI("Port %s was deleted concurrently, skipping it"),
port.port_name)
continue
if cur_tag != lvm.vlan:
self.int_br.delete_flows(in_port=port.ofport)
if self.prevent_arp_spoofing:
self.setup_arp_spoofing_protection(self.int_br,
port, port_detail)
if cur_tag != lvm.vlan:
self.int_br.set_db_attribute(
"Port", port.port_name, "tag", lvm.vlan)
# update plugin about port status
# FIXME(salv-orlando): Failures while updating device status
# must be handled appropriately. Otherwise this might prevent
# neutron server from sending network-vif-* events to the nova
# API server, thus possibly preventing instance spawn.
if port_detail.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
devices_up.append(device)
else:
LOG.debug("Setting status for %s to DOWN", device)
devices_down.append(device)
failed_devices = []
if devices_up or devices_down:
devices_set = self.plugin_rpc.update_device_list(
self.context, devices_up, devices_down, self.agent_id,
self.conf.host)
failed_devices = (devices_set.get('failed_devices_up') +
devices_set.get('failed_devices_down'))
if failed_devices:
LOG.error(_LE("Configuration for devices %s failed!"),
failed_devices)
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=failed_devices)
LOG.info(_LI("Configuration for devices up %(up)s and devices "
"down %(down)s completed."),
{'up': devices_up, 'down': devices_down})
@staticmethod
def setup_arp_spoofing_protection(bridge, vif, port_details):
# clear any previous flows related to this port in our ARP table
bridge.delete_arp_spoofing_protection(port=vif.ofport)
if not port_details.get('port_security_enabled', True):
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
"it has port security disabled"), vif.port_name)
return
if port_details['device_owner'].startswith('network:'):
LOG.debug("Skipping ARP spoofing rules for network owned port "
"'%s'.", vif.port_name)
return
# collect all of the addresses and cidrs that belong to the port
addresses = {f['ip_address'] for f in port_details['fixed_ips']}
if port_details.get('allowed_address_pairs'):
addresses |= {p['ip_address']
for p in port_details['allowed_address_pairs']}
addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses):
# don't try to install protection because a /0 prefix allows any
# address anyway and the ARP_SPA can only match on /1 or more.
return
bridge.install_arp_spoofing_protection(port=vif.ofport,
ip_addresses=addresses)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if vif_id in lvm.vif_ports:
vif_port = lvm.vif_ports[vif_id]
self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port, log_errors=True):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
# Don't kill a port if it's already dead
cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
log_errors=log_errors)
if cur_tag != DEAD_VLAN_TAG:
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG, log_errors=log_errors)
self.int_br.drop_port(in_port=port.ofport)
def setup_integration_br(self):
'''Setup the integration bridge.
'''
self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
# Ensure the integration bridge is created.
# ovs_lib.OVSBridge.create() will run
# ovs-vsctl -- --may-exist add-br BRIDGE_NAME
# which does nothing if bridge already exists.
self.int_br.create()
self.int_br.set_secure_mode()
self.int_br.setup_controllers(self.conf)
self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
self.int_br.setup_default_table()
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
if bridge_id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge)
LOG.info(_LI('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br_name=None):
'''(re)initialize the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br_name: the name of the tunnel bridge.
'''
if not self.tun_br:
self.tun_br = self.br_tun_cls(tun_br_name)
self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
if not self.tun_br.bridge_exists(self.tun_br.br_name):
self.tun_br.create(secure_mode=True)
self.tun_br.setup_controllers(self.conf)
if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or
self.patch_tun_ofport == ovs_lib.INVALID_OFPORT):
self.patch_tun_ofport = self.int_br.add_patch_port(
self.conf.OVS.int_peer_patch_port,
self.conf.OVS.tun_peer_patch_port)
if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or
self.patch_int_ofport == ovs_lib.INVALID_OFPORT):
self.patch_int_ofport = self.tun_br.add_patch_port(
self.conf.OVS.tun_peer_patch_port,
self.conf.OVS.int_peer_patch_port)
if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
self.patch_int_ofport):
LOG.error(_LE("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this "
"version of OVS does not support tunnels or patch "
"ports. Agent terminated!"))
exit(1)
if self.conf.AGENT.drop_flows_on_start:
self.tun_br.delete_flows()
def setup_tunnel_br_flows(self):
'''Setup the tunnel bridge.
Add all flows to the tunnel bridge.
'''
self.tun_br.setup_default_table(self.patch_int_ofport,
self.arp_responder_enabled)
def get_peer_name(self, prefix, name):
"""Construct a peer name based on the prefix and name.
The peer name can not exceed the maximum length allowed for a linux
device. Longer names are hashed to help ensure uniqueness.
"""
if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN:
return prefix + name
# We can't just truncate because bridges may be distinguished
# by an ident at the end. A hash over the name should be unique.
# Leave part of the bridge name on for easier identification
hashlen = 6
namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen
if isinstance(name, six.text_type):
hashed_name = hashlib.sha1(name.encode('utf-8'))
else:
hashed_name = hashlib.sha1(name)
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
{'prefix': prefix, 'truncated': name[0:namelen],
'hash': hashed_name.hexdigest()[0:hashlen]})
LOG.warning(_LW("Creating an interface named %(name)s exceeds the "
"%(limit)d character limitation. It was shortened to "
"%(new_name)s to fit."),
{'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN,
'new_name': new_name})
return new_name
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths or patch ports.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper()
ovs = ovs_lib.BaseOVS()
ovs_bridges = ovs.get_bridges()
for physical_network, bridge in six.iteritems(bridge_mappings):
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if bridge not in ovs_bridges:
LOG.error(_LE("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = self.br_phys_cls(bridge)
br.setup_controllers(self.conf)
br.setup_default_table()
self.phys_brs[physical_network] = br
# interconnect physical and integration bridges using veth/patchs
int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX,
bridge)
phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX,
bridge)
# Interface type of port for physical and integration bridges must
# be same, so check only one of them.
int_type = self.int_br.db_get_val("Interface", int_if_name, "type")
if self.use_veth_interconnection:
# Drop ports if the interface types doesn't match the
# configuration value.
if int_type == 'patch':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
if ip_lib.device_exists(int_if_name):
ip_lib.IPDevice(int_if_name).link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
phys_if_name)
int_ofport = self.int_br.add_port(int_veth)
phys_ofport = br.add_port(phys_veth)
else:
# Drop ports if the interface type doesn't match the
# configuration value
if int_type == 'veth':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
# Create patch ports without associating them in order to block
# untranslated traffic before association
int_ofport = self.int_br.add_patch_port(
int_if_name, constants.NONEXISTENT_PEER)
phys_ofport = br.add_patch_port(
phys_if_name, constants.NONEXISTENT_PEER)
self.int_ofports[physical_network] = int_ofport
self.phys_ofports[physical_network] = phys_ofport
# block all untranslated traffic between bridges
self.int_br.drop_port(in_port=int_ofport)
br.drop_port(in_port=phys_ofport)
if self.use_veth_interconnection:
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
else:
# associate patch ports to pass traffic
self.int_br.set_db_attribute('Interface', int_if_name,
'options:peer', phys_if_name)
br.set_db_attribute('Interface', phys_if_name,
'options:peer', int_if_name)
def update_stale_ofport_rules(self):
# right now the ARP spoofing rules are the only thing that utilizes
# ofport-based rules, so make arp_spoofing protection a conditional
# until something else uses ofport
if not self.prevent_arp_spoofing:
return []
previous = self.vifname_to_ofport_map
current = self.int_br.get_vif_port_to_ofport_map()
# if any ofport numbers have changed, re-process the devices as
# added ports so any rules based on ofport numbers are updated.
moved_ports = self._get_ofport_moves(current, previous)
# delete any stale rules based on removed ofports
ofports_deleted = set(previous.values()) - set(current.values())
for ofport in ofports_deleted:
self.int_br.delete_arp_spoofing_protection(port=ofport)
# store map for next iteration
self.vifname_to_ofport_map = current
return moved_ports
@staticmethod
def _get_ofport_moves(current, previous):
"""Returns a list of moved ports.
Takes two port->ofport maps and returns a list ports that moved to a
different ofport. Deleted ports are not included.
"""
port_moves = []
for name, ofport in previous.items():
if name not in current:
continue
current_ofport = current[name]
if ofport != current_ofport:
port_moves.append(name)
return port_moves
def _get_port_info(self, registered_ports, cur_ports,
readd_registered_ports):
port_info = {'current': cur_ports}
# FIXME(salv-orlando): It's not really necessary to return early
# if nothing has changed.
if not readd_registered_ports and cur_ports == registered_ports:
return port_info
if readd_registered_ports:
port_info['added'] = cur_ports
else:
port_info['added'] = cur_ports - registered_ports
# Update port_info with ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def scan_ports(self, registered_ports, sync, updated_ports=None):
cur_ports = self.int_br.get_vif_port_set()
self.int_br_device_count = len(cur_ports)
port_info = self._get_port_info(registered_ports, cur_ports, sync)
if updated_ports is None:
updated_ports = set()
updated_ports.update(self.check_changed_vlans())
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
return port_info
def scan_ancillary_ports(self, registered_ports, sync):
cur_ports = set()
for bridge in self.ancillary_brs:
cur_ports |= bridge.get_vif_port_set()
return self._get_port_info(registered_ports, cur_ports, sync)
def check_changed_vlans(self):
"""Return ports which have lost their vlan tag.
The returned value is a set of port ids of the ports concerned by a
vlan tag loss.
"""
port_tags = self.int_br.get_port_tag_dict()
changed_ports = set()
for lvm in self.local_vlan_map.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags
and port_tags[port.port_name] != lvm.vlan
):
LOG.info(
_LI("Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'!"),
{'port_name': port.port_name,
'vlan_tag': lvm.vlan}
)
changed_ports.add(port.vif_id)
return changed_ports
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up,
fixed_ips, device_owner, ovs_restarted):
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
port_needs_binding = True
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id)
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id,
fixed_ips, device_owner, ovs_restarted)
else:
self.port_dead(vif_port)
port_needs_binding = False
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
return port_needs_binding
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment,
self.tunnel_csum)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunnelling table (lvid will be set in the latter)
br.setup_tunnel_port(tunnel_type, ofport)
ofports = self.tun_br_ofports[tunnel_type].values()
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for vlan_mapping in list(self.local_vlan_map.values()):
if vlan_mapping.network_type == tunnel_type:
br.install_flood_to_tun(vlan_mapping.vlan,
vlan_mapping.segmentation_id,
ofports)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
return 0
port_name = '%s-%s' % (network_type, remote_ip_hex)
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
items = list(self.tun_br_ofports[tunnel_type].items())
for remote_ip, ofport in items:
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type,
self.get_ip_in_hex(remote_ip))
br.delete_port(port_name)
br.cleanup_tunnel_port(ofport)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added_or_updated(self, devices, ovs_restarted):
skipped_devices = []
need_binding_devices = []
security_disabled_devices = []
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices = devices_details_list.get('devices')
vif_by_id = self.int_br.get_vifs_by_ids(
[vif['device'] for vif in devices])
for details in devices:
device = details['device']
LOG.debug("Processing port: %s", device)
port = vif_by_id.get(device)
if not port:
# The port disappeared and cannot be processed
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
skipped_devices.append(device)
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
details['vif_port'] = port
need_binding = self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'],
details['fixed_ips'],
details['device_owner'],
ovs_restarted)
if need_binding:
need_binding_devices.append(details)
port_security = details['port_security_enabled']
has_sgs = 'security_groups' in details
if not port_security or not has_sgs:
security_disabled_devices.append(device)
self._update_port_network(details['port_id'],
details['network_id'])
self.ext_manager.handle_port(self.context, details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return skipped_devices, need_binding_devices, security_disabled_devices
def _update_port_network(self, port_id, network_id):
self._clean_network_ports(port_id)
self.network_ports[network_id].add(port_id)
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices_added = [
d['device'] for d in devices_details_list.get('devices')]
LOG.info(_LI("Ancillary Ports %s added"), devices_added)
# update plugin about port status
devices_set_up = (
self.plugin_rpc.update_device_list(self.context,
devices_added,
[],
self.agent_id,
self.conf.host))
if devices_set_up.get('failed_devices_up'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError()
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
LOG.info(_LI("Ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for device in devices:
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
LOG.info(_LI("Ancillary ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for detail in devices_down.get('devices_down'):
if detail['exists']:
LOG.info(_LI("Port %s updated."), detail['device'])
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", detail['device'])
return resync
def process_network_ports(self, port_info, ovs_restarted):
resync_a = False
resync_b = False
# TODO(salv-orlando): consider a solution for ensuring notifications
# are processed exactly in the same order in which they were
# received. This is tricky because there are two notification
# sources: the neutron server, and the ovs db monitor process
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
need_binding_devices = []
security_disabled_ports = []
if devices_added_updated:
start = time.time()
try:
(skipped_devices, need_binding_devices,
security_disabled_ports) = (
self.treat_devices_added_or_updated(
devices_added_updated, ovs_restarted))
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d devices of "
"%(num_current)d devices currently available. "
"Time elapsed: %(elapsed).3f",
{'iter_num': self.iter_num,
'num_skipped': len(skipped_devices),
'num_current': len(port_info['current']),
'elapsed': time.time() - start})
# Update the list of current ports storing only those which
# have been actually processed.
port_info['current'] = (port_info['current'] -
set(skipped_devices))
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_network_ports - iteration:%d - "
"failure while retrieving port details "
"from server"), self.iter_num)
resync_a = True
# TODO(salv-orlando): Optimize avoiding applying filters
# unnecessarily, (eg: when there are no IP address changes)
added_ports = port_info.get('added', set())
if security_disabled_ports:
added_ports -= set(security_disabled_ports)
self.sg_agent.setup_port_filters(added_ports,
port_info.get('updated', set()))
self._bind_devices(need_binding_devices)
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info and port_info['added']:
start = time.time()
try:
self.treat_ancillary_devices_added(port_info['added'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_ancillary_network_ports - "
"iteration:%d - failure while retrieving "
"port details from server"), self.iter_num)
resync_a = True
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def get_ip_in_hex(self, ip_address):
try:
return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
return
def tunnel_sync(self):
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type,
self.conf.host)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
remote_ip = tunnel['ip_address']
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
continue
tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
self._setup_tunnel_port(self.tun_br,
tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
return True
return False
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.updated_ports or
self.deleted_ports or
self.sg_agent.firewall_refresh_needed())
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def check_ovs_status(self):
# Check for the canary flow
status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports."))
elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically."))
return status
def loop_count_and_wait(self, start_time, port_stats):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def get_port_stats(self, port_info, ancillary_port_info):
port_stats = {
'regular': {
'added': len(port_info.get('added', [])),
'updated': len(port_info.get('updated', [])),
'removed': len(port_info.get('removed', []))}}
if self.ancillary_brs:
port_stats['ancillary'] = {
'added': len(ancillary_port_info.get('added', [])),
'removed': len(ancillary_port_info.get('removed', []))}
return port_stats
def cleanup_stale_flows(self):
if self.iter_num == 0:
bridges = [self.int_br]
if self.enable_tunneling:
bridges.append(self.tun_br)
for bridge in bridges:
LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name)
bridge.cleanup_flows()
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
sync = True
ports = set()
updated_ports_copy = set()
ancillary_ports = set()
tunnel_sync = True
ovs_restarted = False
consecutive_resyncs = 0
while self._check_and_handle_signal():
port_info = {}
ancillary_port_info = {}
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
polling_manager.force_polling()
consecutive_resyncs = consecutive_resyncs + 1
if consecutive_resyncs >= constants.MAX_DEVICE_RETRIES:
LOG.warn(_LW("Clearing cache of registered ports, retrials"
" to resync were > %s"),
constants.MAX_DEVICE_RETRIES)
ports.clear()
ancillary_ports.clear()
sync = False
consecutive_resyncs = 0
else:
consecutive_resyncs = 0
ovs_status = self.check_ovs_status()
if ovs_status == constants.OVS_RESTARTED:
self.setup_integration_br()
self.setup_physical_bridges(self.bridge_mappings)
if self.enable_tunneling:
self.setup_tunnel_br()
self.setup_tunnel_br_flows()
tunnel_sync = True
if self.enable_distributed_routing:
self.dvr_agent.reset_ovs_parameters(self.int_br,
self.tun_br,
self.patch_int_ofport,
self.patch_tun_ofport)
self.dvr_agent.reset_dvr_parameters()
self.dvr_agent.setup_dvr_flows()
elif ovs_status == constants.OVS_DEAD:
# Agent doesn't apply any operations when ovs is dead, to
# prevent unexpected failure or crash. Sleep and continue
# loop in which ovs status will be checked periodically.
port_stats = self.get_port_stats({}, {})
self.loop_count_and_wait(start, port_stats)
continue
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
if self._agent_has_updates(polling_manager) or ovs_restarted:
try:
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
reg_ports = (set() if ovs_restarted else ports)
port_info = self.scan_ports(reg_ports, sync,
updated_ports_copy)
self.process_deleted_ports(port_info)
ofport_changed_ports = self.update_stale_ofport_rules()
if ofport_changed_ports:
port_info.setdefault('updated', set()).update(
ofport_changed_ports)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Treat ancillary devices if they exist
if self.ancillary_brs:
ancillary_port_info = self.scan_ancillary_ports(
ancillary_ports, sync)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
sync = False
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed() or
ovs_restarted):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info,
ovs_restarted)
self.cleanup_stale_flows()
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
if self.ancillary_brs:
sync |= self.process_ancillary_network_ports(
ancillary_port_info)
LOG.debug("Agent rpc_loop - iteration: "
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = ancillary_port_info['current']
polling_manager.polling_completed()
# Keep this flag in the last line of "try" block,
# so we can sure that no other Exception occurred.
if not sync:
ovs_restarted = False
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
port_stats = self.get_port_stats(port_info, ancillary_port_info)
self.loop_count_and_wait(start, port_stats)
def daemon_loop(self):
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
signal.signal(signal.SIGTERM, self._handle_sigterm)
signal.signal(signal.SIGHUP, self._handle_sighup)
with polling.get_polling_manager(
self.minimize_polling,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def _handle_sigterm(self, signum, frame):
self.catch_sigterm = True
if self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
def _handle_sighup(self, signum, frame):
self.catch_sighup = True
def _check_and_handle_signal(self):
if self.catch_sigterm:
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.run_daemon_loop = False
self.catch_sigterm = False
if self.catch_sighup:
LOG.info(_LI("Agent caught SIGHUP, resetting."))
self.conf.reload_config_files()
config.setup_logging()
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, logging.DEBUG)
self.catch_sighup = False
return self.run_daemon_loop
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.dvr_plugin_rpc, self.state_rpc):
rpc_api.client.timeout = timeout
def _check_agent_configurations(self):
if (self.enable_distributed_routing and self.enable_tunneling
and not self.l2_pop):
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
"underlays require L2-pop to be enabled, "
"in both the Agent and Server side."))
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
enable_distributed_routing=config.AGENT.enable_distributed_routing,
l2_population=config.AGENT.l2_population,
arp_responder=config.AGENT.arp_responder,
prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing,
use_veth_interconnection=config.OVS.use_veth_interconnection,
quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specified: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def validate_local_ip(local_ip):
"""If tunneling is enabled, verify if the ip exists on the agent's host."""
if not cfg.CONF.AGENT.tunnel_types:
return
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
prepare_xen_compute()
validate_local_ip(agent_config['local_ip'])
try:
agent = OVSNeutronAgent(bridge_classes, **agent_config)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
| apache-2.0 |
dkoppstein/recipes | recipes/gridss/gridss.py | 39 | 2707 | #!/opt/anaconda1anaconda2anaconda3/bin/python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
from os import access
from os import getenv
from os import X_OK
jar_file = 'gridss.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
env_prefix = os.path.dirname(os.path.dirname(real_dirname(sys.argv[0])))
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
# Use Java installed with Anaconda to ensure correct version
return os.path.join(env_prefix, 'bin', 'java')
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
jar_arg = '-cp'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
SeerLabs/PDFMEF | src/extraction/test/test_utils.py | 3 | 1277 | import unittest
import subprocess32 as subprocess
import extraction.utils as utils
import os
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def test_external_process_works(self):
(status, out, err) = utils.external_process(['grep', '3'], input_data='Line 1\nLine 2\nLine 3\n')
self.assertEqual(status, 0)
self.assertEqual(out, 'Line 3\n')
self.assertEqual(err, '')
def test_external_process_returns_status_code(self):
(status, out, err) = utils.external_process(['true'])
self.assertEqual(status, 0)
(status, out, err) = utils.external_process(['false'])
self.assertEqual(status, 1)
def test_external_process_timesout(self):
self.assertRaises(subprocess.TimeoutExpired, utils.external_process, ['sleep', '3'], timeout=2)
# This shouldn't timeout and thus shouldn't raise an error
utils.external_process(['sleep', '3'])
def test_temp_file(self):
data = 'test'
file_path = utils.temp_file(data, suffix='.food')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(os.path.splitext(file_path)[1], '.food')
self.assertEqual(open(file_path, 'r').read(), 'test')
os.remove(file_path)
self.assertFalse(os.path.isfile(file_path))
| apache-2.0 |
avelino/vcm | lib/app/libvirttemplate.py | 2 | 12232 | # -*- coding: utf-8 -*-
import os, sys, random
import virt
# libvirt XML Template
# General Metadata
# This name should consist only of alpha-numeric characters and is required to be unique within the scope of a single host
# args :
# name : name for virtual machine. ( ex: host112 )
def GENERAL_METADATA(type_,name, uuid=None):
xml = """
<domain type="%s">
<name>%s</name>
""" %(type_,name)
if uuid:
xml += " <uuid>%s</uuid>\n" %uuid
return xml
# Basic resources
# args:
# memory : Maximum allocation of memory for the guest at boot time
#
# currentmemory : The actual allocation of memory for the guest.
# This value be less than the maximum allocation, to allow for ballooning up the guests
# memory on the fly.
# vcpu : Number of virtual CPUs allocated for the guest OS.
def BASIC_RESOURCE(memory, vcpu, currentMemory=None):
try:
memory = int(memory) * 1024
except:
pass
xml = " <memory>%s</memory>\n" %memory
if currentMemory:
try:
currentMemory = int(int(currentMemory) * 1024)
except:
pass
xml += " <currentMemory>%s</currentMemory>\n" %currentMemory
xml += " <vcpu>%s</vcpu>\n" %vcpu
return xml
# Host bootloader
# Hypervisors employing paravirtualization do not usually emulate a BIOS,
# and instead the host is responsible to kicking off the operating system boot.
# This may use a pseudo-bootloader in the host to provide an
# interface to choose a kernel for the guest. An example is pygrub with Xen.
# args:
# bootloader : example: path pygrub ( ex: /usr/bin/pygrub)
# bootloader_args : exmaple: --append single
def HOST_BOOTLOADER(bootloader=None, bootloader_args=None):
xml = str()
if bootloader:
xml += " <bootloader>%s</bootloader>\n" %bootloader
if bootloader_args:
xml += " <bootloader_args>%s</bootloader_args>\n"
return xml
# Fullyvirtualized guest BIOS boot
# args:
# type : example: hvm
# loader : path hvmloader ( ex: /usr/lib/xen-3.2-1/boot/hvmloader )
# boot dev : hd, cdrom
# Paravirtualized guest direct kernel boot
# args:
# kernel : path vmlinuz kernel ( ex: /boot/vmlinuz-2.6.26-2-xen-amd64)
# initrd : path initrd kernel ( ex: /boot/initrd.img-2.6.26-2-xen-amd64)
# cmdline : options boot ( ex: root=/dev/sda1 ro console=hvc0 )
def OS_DETAIL(type_,arch=None, machine=None, bootloader=None, loader=None,kernel=None,initrd=None,cmdline=None,boot=None):
xml = " <os>\n"
xml += " <type"
if arch:
xml+=""" arch="%s\"""" %arch
if machine:
xml +=""" machine="%s\"""" %machine
xml +=">%s</type>\n" %type_
if loader:
xml += " <loader>%s</loader>\n" %loader
if kernel:
xml += " <kernel>%s</kernel>\n" %kernel
if initrd:
xml += " <initrd>%s</initrd>\n" %initrd
if cmdline:
xml += " <cmdline>%s</cmdline>\n" %cmdline
if boot:
xml += """ <boot dev="%s"/>\n""" %boot
xml += " </os>\n"
return xml
# Hypervisor features
# pae
# Physical address extension mode allows 32-bit guests to address more than 4 GB of memory.
# acpi
# ACPI is useful for power management, for example, with KVM guests it is required for graceful shutdown to work.
def HYPERVISOR_FEATURES(pae=None,acpi=None,apic=None):
xml = str()
if pae or acpi or apic:
xml = " <features>\n"
if pae:
xml +=" <pae/>\n"
if acpi:
xml +=" <acpi/>\n"
if apic:
xml +=" <apic/>\n"
xml += " </features>\n"
return xml
# Time keeping
# args:
# offset : localtime or utc
def TIME_KEEPING(clock=None):
if clock:
return """ <clock offset="%s"/>\n""" %clock
return "\n"
# Lifecycle control
# args:
# on_poweroff : destroy
# on_reboot : restart
# on_crash : restart
def LIFECYCLE_CONTROL(poweroff,reboot,crash):
xml = " <on_poweroff>%s</on_poweroff>\n" %poweroff
xml += " <on_reboot>%s</on_reboot>\n" %reboot
xml += " <on_crash>%s</on_crash>\n" %crash
return xml
# Devices
# args:
# emulator : path of qemu-dm ( ex: /usr/lib/xen-3.2-1/bin/qemu-dm )
def EMULATOR(emulator):
return "<emulator>%s</emulator>\n" %emulator
# Hard drives, floppy disks, CDROMs
# args:
# type : file or block
# device: disk, floppy, cdrom
# driver name : phy , file
# source : file or dev ( if type = block, source dev=, if type = file, source file=)
# ex: dev='/dev/lvmxen0/hostXXX-disk'
# target dev : sda or sdb or hda or hdc ....
# bus : ide or scsi
# readonly
# shareable
# serial : example: <serial>WD-WMAP9A966149</serial>.
# options :
# <encryption type='...'>
# ...
# </encryption>
def HARD_DRIVE(type_, device, source, target, target_bus=None, serial=None, readonly=None, \
shareable=None, driver=None, driver_type=None, driver_cache=None, options=None):
xml = """<disk device="%s" type="%s">\n""" %(device,type_)
if driver:
# driver
xml += """ <driver name="%s\"""" %driver
if driver_type:
xml += """ type="%s\"""" %driver_type
if driver_cache:
xml += """ cache="%s\"""" %driver_cache
xml += "/>\n"
# source
if type_ == 'file':
xml += """ <source file="%s"/>\n""" %source
else:
xml += """ <source dev="%s"/>\n""" %source
# target
xml += """ <target"""
if target_bus:
xml += """ bus="%s\"""" %target_bus
xml += """ dev="%s"/>\n""" %target
if readonly:
xml += " <readonly/>\n"
if shareable:
xml += " <shareable/>\n"
if serial:
xml += " <serial>%s</serial>\n" %serial
if options:
xml +="%s\n" %options
xml += "</disk>\n"
return xml
# Network interfaces
# Virtual Network
# args :
# <target dev='vnet7'/>
# <mac address="11:22:33:44:55:66"/>
def INTERFACE(type_, source, mac=None, source_port=None, target=None, script=None, model=None):
xml = """<interface type="%s">\n""" %type_
if type_ in ['bridge','network']:
xml += """ <source %s="%s"/>\n""" %(type_,source)
if type_ in ['mcast','server','client']:
xml += """ <source address="%s\"""" %source
if source_port:
xml +=""" port="%s\"""" %source_port
xml +="/>\n"
if type_ == 'direct':
xml += """ <source dev="%s" mode="vepa"/>\n""" %source
if mac and type_ in ['bridge','network','user']:
xml += """ <mac address="%s"/>\n""" %mac
if target:
xml +=""" <target dev="%s"/>\n""" %target
if script:
xml +=""" <script path="%s"/>\n""" %script
if model:
xml += """ <model type="%s"/>\n""" %model
xml += "</interface>\n"
return xml
# Input devices
# args: type, bus ( ex : type= mouse, tablet . bus= ps2 or usb )
def INPUT_DEVICE(type_, bus):
return """<input bus="%s" type="%s"/>\n""" %(bus,type_)
# Graphical framebuffers
# args: port, autoport, listen (ex: port = -01 or 5901, autoport = yes or no, listen = ip address or 0.0.0.0 )
def GRAPHICAL_VNC(listen,autoport=True,port=None,passwd=None):
if autoport or port is None:
port = '-1'
xml = """<graphics"""
if autoport:
xml += """ autoport="yes\""""
xml += """ port="%s\"""" %port or '-1'
xml += """ listen="%s\"""" %listen
if passwd:
xml += """ passwd="%s\"""" %passwd
xml+=""" type="vnc"/>\n"""
return xml
# args : autoport = yes or no, multiUser = yes or no
def GRAPHICAL_RDP(autoport=True, multiuser=True):
xml = """<graphics type="rdp\""""
if autoport:
xml += """ autoport="yes\""""
if multiuser:
xml += """ multiUser="yes\""""
xml +="/>\n"
return xml
def GRAPHICAL_SDL(display=':0.0',fullscreen=True,xauth=None):
xml += """<graphics type="sdl\""""
if xauth:
xml += """ xauth="%s\"""" %xauth
if fullscreen:
xml += """ fullscreen="yes\""""
xml += """ display="%s"/>""" %display
# args: source path, target path
# source path='/dev/pts/0
# target port='0'
def SERIAL_PORT(target,type_='pty',source=None):
xml = """<serial type="%s">\n""" %type_
if source:
xml += """ <source path="%s"/>\n""" %source
xml += """ <target port="%s"/>\n""" %target
xml += "</serial>\n"
return xml
# args: sourcepath, target path
# source path='/dev/pts/0
# target port='0'
def CONSOLE_PORT(target,type_='pty',source=None):
xml = """<console type="%s">\n""" %type_
if source:
xml += """ <source path="%s"/>\n""" %source
xml += """ <target port="%s"/>\n""" %target
xml += "</console>\n"
return xml
# parallel port
def PARALLEL_PORT(source,target,type_='pty'):
xml = """<parallel type="%s">\n""" %type_
if source:
xml += """ <source path="%s"/>\n""" %source
xml += """ <target port="%s"/>\n""" %target
xml += "</parallel>\n"
return xml
# Sound devices
# Since libvirt 0.4.3
#
# model: 'es1370', 'sb16', and 'ac97' (ac97 since libvirt 0.6.0 )
def SOUND(model):
return"""
<sound model="%s"/>
""" %model
# Video devices
# model
# The model element has a mandatory type attribute which
# takes the value "vga", "cirrus", "vmvga", "xen" or "vbox".
# You can also #provide the amount of video memory in kilobytes using vram and the number of screen with heads.
# acceleration
# If acceleration should be enabled (if supported) using the accel3d and accel2d attributes in the acceleration element.
def VIDEO(type_='vga',vram=8192,heads=1,accel3d=True):
xml = """
<video>
<model type="%s" vram="%s" heads="%s">
""" %(type_,vram,heads)
if accel3d:
xml += """ <acceleration accel3d="yes" accel2d="yes"/>
"""
xml += " </model>\n"
xml += "</video>\n"
return xml
# USB and PCI Devices
#
# args: vendor id, function id, ex: 0x1234, 0xbeef
def HOSTDEV_USB(vendor,product):
return """
<hostdev mode="subsystem" type="usb">
<source>
<vendor id="%s"/>
<product id="%s"/>
</source>
</hostdev>
""" %(vendor,product)
# args : bus, slot, function, ex: bus='0x06', slot='0x02', function='0x0'
def HOSTDEV_PCI(bus,slot,function):
return """
<hostdev mode="subsystem" type="pci">
<source>
<address bus="%s" slot="%s" function="%s"/>
</source>
</hostdev>
""" %(bus,slot,function)
def CONTROLLER(type_='scsi',index='0', model=None):
xml = """
<controller type="%s" index="%s\"""" %(type_,index)
if model:
xml +=""" model="%s\"""" %model
xml +="/>\n"
return xml
END_DOMAIN = """</domain>"""
def macgen(amount=50,mtype='xen'):
"""
Generate random valid MAC address for Domains
"""
# Registered MAC address prefixes
vmwareprefix='00:50:56:'
xenprefix='00:16:3E:'
vboxprefix='08:00:27:'
qemuprefix='52:54:00:'
openvzprefix='00:18:51:'
# default xen prefix
macprefix=xenprefix
# VMWare mac address prefix
if mtype == 'vmware':
macprefix=vmwareprefix
# Virtualbox prefix
if mtype == 'vbox':
macprefix=vboxprefix
# QEMU/KVM prefix
if mtype == 'qemu' or mtype == 'kvm':
macprefix=qemuprefix
# OpenVZ prefix
if mtype == 'openvz':
macprefix=openvzprefix
maclist = []
for iface in virt.models.Device.objects.filter(type='interface'):
if iface.getdict().get('mac'):
maclist.append(str(iface.getdict().get('mac')).upper())
macs = []
for mac in [ ':'.join(['%02x' % random.randint(0,255) for p in range(3)]) for r in range(amount) ]:
mac = '%s%s' %(macprefix,mac.upper())
if mac not in maclist:
macs.append(mac)
return macs
| gpl-3.0 |
p/wolis-phpbb | tests/subscribe.py | 1 | 1812 | from wolis.test_case import WolisTestCase
class SubscribeTopicTestCase(WolisTestCase):
def test_subscribe_topic(self):
self.login('test43', 'test42')
url = '/index.php'
self.get(url)
self.assert_successish()
assert 'Index page' in self.response.body
href = self.link_href_by_text('Your first forum')
url = self.response.urljoin(href)
self.get(url)
self.assert_successish()
# topic
href = self.link_href_by_text('Welcome to phpBB3')
url = self.response.urljoin(href)
self.get(url)
self.assert_successish()
#assert 'Unsubscribe topic' not in self.response.body
href = self.link_href_by_title('Subscribe topic')
url = self.response.urljoin(href)
self.get(url)
self.assert_successish()
assert 'You have subscribed to be notified of new posts in this topic.' in self.response.body
def test_subscribe_forum(self):
self.login('test43', 'test42')
url = '/index.php'
self.get(url)
self.assert_successish()
assert 'Index page' in self.response.body
href = self.link_href_by_text('Your first forum')
url = self.response.urljoin(href)
self.get(url)
self.assert_successish()
#assert 'Unsubscribe forum' not in self.response.body
href = self.link_href_by_title('Subscribe forum')
url = self.response.urljoin(href)
self.get(url)
self.assert_successish()
assert 'You have subscribed to be notified of new posts in this forum.' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
| bsd-2-clause |
mdrumond/tensorflow | tensorflow/contrib/specs/python/summaries_test.py | 112 | 3062 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(test.TestCase):
def testStructure(self):
with self.test_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(
spec, input_shape=inputs_shape),
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
test.main()
| apache-2.0 |
peter-jang/ansible | lib/ansible/compat/tests/__init__.py | 339 | 1268 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
This module contains things that are only needed for compat in the testsuites,
not in ansible itself. If you are not installing the test suite, you can
safely remove this subdirectory.
'''
#
# Compat for python2.7
#
# One unittest needs to import builtins via __import__() so we need to have
# the string that represents it
try:
import __builtin__
except ImportError:
BUILTINS = 'builtins'
else:
BUILTINS = '__builtin__'
| gpl-3.0 |
FreicoinAlliance/p2pool | p2pool/bitcoin/networks/litecoin.py | 29 | 1199 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fbc0b6db'.decode('hex')
P2P_PORT = 9333
ADDRESS_VERSION = 48
RPC_PORT = 9332
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'litecoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 50*100000000 >> (height + 1)//840000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 150 # s
SYMBOL = 'LTC'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Litecoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Litecoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.litecoin'), 'litecoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/address/'
TX_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.03e8
| gpl-3.0 |
arielmakestuff/loadlimit | test/unit/util/test_event.py | 1 | 4403 | # -*- coding: utf-8 -*-
# test/unit/util/test_event.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test Logger"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import json
import logging
# Third-party imports
from pandas import Timestamp
import pytest
# Local imports
from loadlimit.util import Event, EventType, Logger, now
# ============================================================================
# Test __init__
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_event_type_badval(val):
"""Raise error if given a bad value for the event_type arg"""
expected = ('event_type arg expected {} object, got {} object instead'.
format(EventType.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(val)
assert err.value.args == (expected, )
@pytest.mark.parametrize('val', list(EventType))
def test_init_event_type_goodval(val):
"""Accept valid value for the event_type arg"""
e = Event(val)
assert e.type == val
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_timestamp_badval(val):
"""Raise error if given a bad value for the timestamp arg"""
expected = ('timestamp arg expected {} object, got {} object instead'.
format(Timestamp.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(EventType.start, val)
assert err.value.args == (expected, )
def test_init_timestamp_noval():
"""Automatically create the current timestamp if arg given None"""
cur = now()
e = Event(EventType.start)
assert e.timestamp.floor('s') == cur.floor('s')
def test_init_timestamp_goodval():
"""Accept valid value for timestamp arg"""
cur = now()
e = Event(EventType.start, cur)
assert e.timestamp == cur
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_logger_badbal(val):
"""Raise error if given bad value for the logger arg"""
expected = ('logger arg expected {} object, got {} object instead'.
format(Logger.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(EventType.start, logger=val)
assert err.value.args == (expected, )
def test_init_logger_noval(caplog):
"""Don't log anything if logger arg is not given a value"""
with caplog.at_level(logging.DEBUG):
Event(EventType.start)
assert len(caplog.records) == 0
def test_init_logger_goodval(caplog):
"""Log an info message if given a logging.Logger object"""
logger = Logger(name=__name__)
e = Event(EventType.start, logger=logger)
expected = dict(name=e.type.name, timestamp=e.timestamp)
assert len(caplog.records) == 1
r = caplog.records[0]
# Check record
pre = 'EVENT: '
assert r.levelno == logging.INFO
assert r.message.startswith(pre)
message = json.loads(r.message[len(pre):])
message['timestamp'] = Timestamp(message['timestamp'], tz='UTC')
assert message == expected
# ============================================================================
# Test __getitem__
# ============================================================================
@pytest.mark.parametrize('key', [0, 1])
def test_getitem_goodkey(key):
"""__getitem__() retrieves correct value"""
e = Event(EventType.start, now())
assert e[key] == e._val[key]
def test_getitem_badkey():
"""Raise error when given bad key"""
expected = 'tuple index out of range'
e = Event(EventType.start)
with pytest.raises(IndexError) as err:
e[42]
assert err.value.args == (expected, )
# ============================================================================
# Test __len__
# ============================================================================
def test_len():
"""Return number of items contained in the Event"""
e = Event(EventType.start)
assert len(e) == 2
# ============================================================================
#
# ============================================================================
| mit |
dya2/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/X509CertChain.py | 238 | 6861 | """Class representing an X.509 certificate chain."""
from utils import cryptomath
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True | apache-2.0 |
javierlgroba/Eventer-gapp | django/contrib/humanize/tests.py | 100 | 9960 | from __future__ import unicode_literals
import datetime
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.contrib.humanize.templatetags import humanize
from django.template import Template, Context, defaultfilters
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.html import escape
from django.utils.timezone import utc
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils import tzinfo
from django.utils.unittest import skipIf
# Mock out datetime in some tests so they don't fail occasionally when they
# run too slow. Use a fixed datetime for datetime.now(). DST change in
# America/Chicago (the default time zone) happened on March 11th in 2012.
now = datetime.datetime(2012, 3, 9, 22, 30)
class MockDateTime(datetime.datetime):
@classmethod
def now(self, tz=None):
if tz is None or tz.utcoffset(now) is None:
return now
else:
# equals now.replace(tzinfo=utc)
return now.replace(tzinfo=tz) + tz.utcoffset(now)
class HumanizeTests(TestCase):
def humanize_tester(self, test_list, result_list, method):
# Using max below ensures we go through both lists
# However, if the lists are not equal length, this raises an exception
for test_content, result in zip(test_list, result_list):
t = Template('{%% load humanize %%}{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result),
msg="%s test failed, produced '%s', should've produced '%s'" % (method, rendered, result))
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567',
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
None)
self.humanize_tester(test_list, result_list, 'intcomma')
def test_l10n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567',
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=False):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override('ja'):
with self.settings(USE_L10N=True):
self.humanize_tester([100], ['100'], 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000',
'1300000000000000', '3500000000000000000000',
'8100000000000000000000000000000000', None)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion',
'1.3 quadrillion', '3.5 sextillion',
'8.1 decillion', None)
self.humanize_tester(test_list, result_list, 'intword')
def test_i18n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', None)
result_list = ('100', '1.000', '10.123', '10.311', '1.000.000', '1.234.567,25',
'100', '1.000', '10.123', '10.311', '1.000.000', None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_i18n_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000')
result_list = ('100', '1,0 Million', '1,2 Millionen', '1,3 Millionen',
'1,0 Milliarde', '2,0 Milliarden', '6,0 Billionen')
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = ('one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', '10', None)
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_('today'), _('yesterday'), _('tomorrow'),
someday_result, "I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = tzinfo.FixedOffset(datetime.timedelta(hours=-12))
tz_two = tzinfo.FixedOffset(datetime.timedelta(hours=12))
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
@skipIf(settings.TIME_ZONE != "America/Chicago" and pytz is None,
"this test requires pytz when a non-default time zone is set")
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=utc)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
self.humanize_tester([dt], ['yesterday'], 'naturalday')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
now,
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=utc),
]
result_list = [
'now',
'a second ago',
'30 seconds ago',
'a minute ago',
'2 minutes ago',
'an hour ago',
'23 hours ago',
'1 day ago',
'1 year, 4 months ago',
'a second from now',
'30 seconds from now',
'a minute from now',
'2 minutes from now',
'an hour from now',
'23 hours from now',
'1 day from now',
'2 days, 6 hours from now',
'1 year, 4 months from now',
'now',
'now',
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == '2 days, 6 hours from now'
result_list_with_tz_support[-4] == '2 days, 5 hours from now'
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
self.humanize_tester(test_list, result_list, 'naturaltime')
with override_settings(USE_TZ=True):
self.humanize_tester(test_list, result_list_with_tz_support, 'naturaltime')
finally:
humanize.datetime = orig_humanize_datetime
| lgpl-3.0 |
jgeskens/django | tests/staticfiles_tests/tests.py | 8 | 31390 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import posixpath
import shutil
import sys
import tempfile
from django.template import loader, Context
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils.functional import empty
from django.utils._os import rmtree_errorhandler, upath
from django.utils import six
from django.contrib.staticfiles import finders, storage
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': (
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
),
'STATICFILES_FINDERS': (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
),
}
from django.contrib.staticfiles.management.commands.collectstatic import Command as CollectstaticCommand
class BaseStaticFilesTestCase(object):
"""
Test case with a couple utility assertions.
"""
def setUp(self):
# Clear the cached staticfiles_storage out, this is because when it first
# gets accessed (by some other test), it evaluates settings.STATIC_ROOT,
# since we're planning on changing that we need to clear out the cache.
storage.staticfiles_storage._wrapped = empty
# Clear the cached staticfile finders, so they are reinitialized every
# run and pick up changes in settings.STATICFILES_DIRS.
finders._finders.clear()
testfiles_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
# To make sure SVN doesn't hangs itself with the non-ASCII characters
# during checkout, we actually create one file dynamically.
self._nonascii_filepath = os.path.join(testfiles_path, '\u2297.txt')
with codecs.open(self._nonascii_filepath, 'w', 'utf-8') as f:
f.write("\u2297 in the app dir")
# And also create the stupid hidden file to dwarf the setup.py's
# package data handling.
self._hidden_filepath = os.path.join(testfiles_path, '.hidden')
with codecs.open(self._hidden_filepath, 'w', 'utf-8') as f:
f.write("should be ignored")
self._backup_filepath = os.path.join(
TEST_ROOT, 'project', 'documents', 'test', 'backup~')
with codecs.open(self._backup_filepath, 'w', 'utf-8') as f:
f.write("should be ignored")
def tearDown(self):
os.unlink(self._nonascii_filepath)
os.unlink(self._hidden_filepath)
os.unlink(self._backup_filepath)
def assertFileContains(self, filepath, text):
self.assertIn(text, self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = loader.get_template_from_string(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, TestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
self.old_root = settings.STATIC_ROOT
settings.STATIC_ROOT = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
self.run_collectstatic()
# Use our own error handler that can handle .svn dirs on Windows
self.addCleanup(shutil.rmtree, settings.STATIC_ROOT,
ignore_errors=True, onerror=rmtree_errorhandler)
def tearDown(self):
settings.STATIC_ROOT = self.old_root
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity='0',
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
class TestFindStatic(CollectionTestCase, TestDefaults):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
out = six.StringIO()
call_command('findstatic', filepath, all=False, verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
with codecs.open(force_text(lines[0].strip()), "r", "utf-8") as f:
return f.read()
def test_all_files(self):
"""
Test that findstatic returns all candidate files if run without --first and -v1.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=1, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertIn('project', force_text(lines[1]))
self.assertIn('apps', force_text(lines[2]))
def test_all_files_less_verbose(self):
"""
Test that findstatic returns all candidate files if run without --first and -v0.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 2)
self.assertIn('project', force_text(lines[0]))
self.assertIn('apps', force_text(lines[1]))
class TestCollection(CollectionTestCase, TestDefaults):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
Test that -i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
"""
Test the ``--clear`` option of the ``collectstatic`` management command.
"""
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super(TestCollectionClear, self).run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
class TestCollectionExcludeNoDefaultIgnore(CollectionTestCase, TestDefaults):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the
``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestNoFilesCreated(object):
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestCollectionDryRun(CollectionTestCase, TestNoFilesCreated):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionDryRun, self).run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in INSTALLED_APPS even if file modification
dates are in different order:
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
# get modification and access times for no_label/static/file2.txt
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from no_label app
# this file will have modification time older than no_label/static/file2.txt
# anyway it should be taken to STATIC_ROOT because 'test' app is before
# 'no_label' app in INSTALLED_APPS
self.testfile_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'file2.txt')
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
super(TestCollectionFilesOverride, self).setUp()
def tearDown(self):
if os.path.exists(self.testfile_path):
os.unlink(self.testfile_path)
# set back original modification time
os.utime(self.orig_path, (self.orig_atime, self.orig_mtime))
super(TestCollectionFilesOverride, self).tearDown()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# and now change modification time of no_label/static/file2.txt
# test app is first in INSTALLED_APPS so file2.txt should remain unmodified
mtime = os.path.getmtime(self.testfile_path)
atime = os.path.getatime(self.testfile_path)
os.utime(self.orig_path, (mtime + 1, atime + 1))
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage',
)
class TestCollectionNonLocalStorage(CollectionTestCase, TestNoFilesCreated):
"""
Tests for #15035
"""
pass
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, TestCase):
"""
Tests for the Cache busting storage
"""
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.93b1147e8552.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.cached_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.93b1147e8552.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.cached_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.cached_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath,
"cached/styles.93b1147e8552.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.93b1147e8552.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.cached_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.93b1147e8552.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.93b1147e8552.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.cached_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.cached_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.23f087ad823a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.93b1147e8552.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.cached_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.93b1147e8552.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.cached_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.2217ea7273c2.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.93b1147e8552.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.cached_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.93b1147e8552.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.cached_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.9db38d5169f3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.cached_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.615e21601e4b.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.93b1147e8552.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.cache_key(name)
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(self.cached_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.cache.clear()
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.cached_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_post_processing(self):
"""Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': '0',
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/" + "\x16" + "\xb4"
cache_key = storage.staticfiles_storage.cache_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
def test_css_import_case_insensitive(self):
relpath = self.cached_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.2f0151cca872.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@override_settings(
STATICFILES_DIRS=(os.path.join(TEST_ROOT, 'project', 'faulty'),),
STATICFILES_FINDERS=('django.contrib.staticfiles.finders.FileSystemFinder',),
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders._finders.clear()
err = six.StringIO()
with self.assertRaises(Exception) as cm:
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, TestCase):
"""
Tests for the Cache busting storage
"""
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.cached_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
if sys.platform != 'win32':
class TestCollectionLinks(CollectionTestCase, TestDefaults):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self):
super(TestCollectionLinks, self).run_collectstatic(link=True)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt')))
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
urls = 'staticfiles_tests.urls.default'
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def setUp(self):
super(TestServeDisabled, self).setUp()
settings.DEBUG = False
def test_disabled_serving(self):
six.assertRaisesRegex(self, ImproperlyConfigured, 'The staticfiles view '
'can only be used in debug mode ', self._response, 'test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with manually configured URLconf.
"""
pass
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
urls = 'staticfiles_tests.urls.helper'
class TestServeAdminMedia(TestServeStatic):
"""
Test serving media from django.contrib.admin.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, 'admin/', filepath))
def test_serve_admin_media(self):
self.assertFileContains('css/base.css', 'body')
class FinderTestCase(object):
"""
Base finder test mixin.
On Windows, sometimes the case of the path we ask the finders for and the
path(s) they find can differ. Compare them using os.path.normcase() to
avoid false negatives.
"""
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
class TestMiscFinder(TestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertIsInstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder)
def test_get_finder_bad_classname(self):
self.assertRaises(ImproperlyConfigured, finders.get_finder,
'django.contrib.staticfiles.finders.FooBarFinder')
def test_get_finder_bad_module(self):
self.assertRaises(ImproperlyConfigured,
finders.get_finder, 'foo.bar.FooBarFinder')
@override_settings(STATICFILES_DIRS='a string')
def test_non_tuple_raises_exception(self):
"""
We can't determine if STATICFILES_DIRS is set correctly just by
looking at the type, but we can determine if it's definitely wrong.
"""
self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder)
@override_settings(MEDIA_ROOT='')
def test_location_empty(self):
self.assertRaises(ImproperlyConfigured, finders.DefaultStorageFinder)
class TestTemplateTag(StaticFilesTestCase):
def test_template_tag(self):
self.assertStaticRenders("does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("testfile.txt", "/static/testfile.txt")
| bsd-3-clause |
BT-rmartin/odoo | openerp/tools/pdf_utils.py | 456 | 3659 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/google/protobuf/internal/message_set_extensions_pb2.py | 42 | 8373 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/message_set_extensions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/message_set_extensions.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MESSAGE_SET_EXTENSION3_FIELD_NUMBER = 98418655
message_set_extension3 = _descriptor.FieldDescriptor(
name='message_set_extension3', full_name='google.protobuf.internal.message_set_extension3', index=0,
number=98418655, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TESTMESSAGESET = _descriptor.Descriptor(
name='TestMessageSet',
full_name='google.protobuf.internal.TestMessageSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001')),
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 2147483647), ],
oneofs=[
],
serialized_start=83,
serialized_end=113,
)
_TESTMESSAGESETEXTENSION1 = _descriptor.Descriptor(
name='TestMessageSetExtension1',
full_name='google.protobuf.internal.TestMessageSetExtension1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='i', full_name='google.protobuf.internal.TestMessageSetExtension1.i', index=0,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension1.message_set_extension', index=0,
number=98418603, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=281,
)
_TESTMESSAGESETEXTENSION2 = _descriptor.Descriptor(
name='TestMessageSetExtension2',
full_name='google.protobuf.internal.TestMessageSetExtension2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='str', full_name='google.protobuf.internal.TestMessageSetExtension2.str', index=0,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension2.message_set_extension', index=0,
number=98418634, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=284,
serialized_end=451,
)
_TESTMESSAGESETEXTENSION3 = _descriptor.Descriptor(
name='TestMessageSetExtension3',
full_name='google.protobuf.internal.TestMessageSetExtension3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='google.protobuf.internal.TestMessageSetExtension3.text', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=493,
)
DESCRIPTOR.message_types_by_name['TestMessageSet'] = _TESTMESSAGESET
DESCRIPTOR.message_types_by_name['TestMessageSetExtension1'] = _TESTMESSAGESETEXTENSION1
DESCRIPTOR.message_types_by_name['TestMessageSetExtension2'] = _TESTMESSAGESETEXTENSION2
DESCRIPTOR.message_types_by_name['TestMessageSetExtension3'] = _TESTMESSAGESETEXTENSION3
DESCRIPTOR.extensions_by_name['message_set_extension3'] = message_set_extension3
TestMessageSet = _reflection.GeneratedProtocolMessageType('TestMessageSet', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESET,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSet)
))
_sym_db.RegisterMessage(TestMessageSet)
TestMessageSetExtension1 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension1', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION1,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension1)
))
_sym_db.RegisterMessage(TestMessageSetExtension1)
TestMessageSetExtension2 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension2', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION2,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension2)
))
_sym_db.RegisterMessage(TestMessageSetExtension2)
TestMessageSetExtension3 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension3', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION3,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension3)
))
_sym_db.RegisterMessage(TestMessageSetExtension3)
message_set_extension3.message_type = _TESTMESSAGESETEXTENSION3
TestMessageSet.RegisterExtension(message_set_extension3)
_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION1
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'])
_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION2
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'])
_TESTMESSAGESET.has_options = True
_TESTMESSAGESET._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001'))
# @@protoc_insertion_point(module_scope)
| mit |
Jeff-Tian/mybnb | Python27/Lib/lib2to3/btm_utils.py | 62 | 10294 | "Utility functions used by the btm_matcher module"
from . import pytree
from .pgen2 import grammar, token
from .pygram import pattern_symbols, python_symbols
syms = pattern_symbols
pysyms = python_symbols
tokens = grammar.opmap
token_labels = token
TYPE_ANY = -1
TYPE_ALTERNATIVES = -2
TYPE_GROUP = -3
class MinNode(object):
"""This class serves as an intermediate representation of the
pattern tree during the conversion to sets of leaf-to-root
subpatterns"""
def __init__(self, type=None, name=None):
self.type = type
self.name = name
self.children = []
self.leaf = False
self.parent = None
self.alternatives = []
self.group = []
def __repr__(self):
return str(self.type) + ' ' + str(self.name)
def leaf_to_root(self):
"""Internal method. Returns a characteristic path of the
pattern tree. This method must be run for all leaves until the
linear subpatterns are merged into a single"""
node = self
subp = []
while node:
if node.type == TYPE_ALTERNATIVES:
node.alternatives.append(subp)
if len(node.alternatives) == len(node.children):
#last alternative
subp = [tuple(node.alternatives)]
node.alternatives = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == TYPE_GROUP:
node.group.append(subp)
#probably should check the number of leaves
if len(node.group) == len(node.children):
subp = get_characteristic_subpattern(node.group)
node.group = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == token_labels.NAME and node.name:
#in case of type=name, use the name instead
subp.append(node.name)
else:
subp.append(node.type)
node = node.parent
return subp
def get_linear_subpattern(self):
"""Drives the leaf_to_root method. The reason that
leaf_to_root must be run multiple times is because we need to
reject 'group' matches; for example the alternative form
(a | b c) creates a group [b c] that needs to be matched. Since
matching multiple linear patterns overcomes the automaton's
capabilities, leaf_to_root merges each group into a single
choice based on 'characteristic'ity,
i.e. (a|b c) -> (a|b) if b more characteristic than c
Returns: The most 'characteristic'(as defined by
get_characteristic_subpattern) path for the compiled pattern
tree.
"""
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp
def leaves(self):
"Generator that returns the leaves of the tree"
for child in self.children:
for x in child.leaves():
yield x
if not self.children:
yield self
def reduce_tree(node, parent=None):
"""
Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*).
"""
new_node = None
#switch on the node type
if node.type == syms.Matcher:
#skip
node = node.children[0]
if node.type == syms.Alternatives :
#2 cases
if len(node.children) <= 2:
#just a single 'Alternative', skip this node
new_node = reduce_tree(node.children[0], parent)
else:
#real alternatives
new_node = MinNode(type=TYPE_ALTERNATIVES)
#skip odd children('|' tokens)
for child in node.children:
if node.children.index(child)%2:
continue
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
elif node.type == syms.Alternative:
if len(node.children) > 1:
new_node = MinNode(type=TYPE_GROUP)
for child in node.children:
reduced = reduce_tree(child, new_node)
if reduced:
new_node.children.append(reduced)
if not new_node.children:
# delete the group if all of the children were reduced to None
new_node = None
else:
new_node = reduce_tree(node.children[0], parent)
elif node.type == syms.Unit:
if (isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '('):
#skip parentheses
return reduce_tree(node.children[1], parent)
if ((isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '[')
or
(len(node.children)>1 and
hasattr(node.children[1], "value") and
node.children[1].value == '[')):
#skip whole unit if its optional
return None
leaf = True
details_node = None
alternatives_node = None
has_repeater = False
repeater_node = None
has_variable_name = False
for child in node.children:
if child.type == syms.Details:
leaf = False
details_node = child
elif child.type == syms.Repeater:
has_repeater = True
repeater_node = child
elif child.type == syms.Alternatives:
alternatives_node = child
if hasattr(child, 'value') and child.value == '=': # variable name
has_variable_name = True
#skip variable name
if has_variable_name:
#skip variable name, '='
name_leaf = node.children[2]
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
# skip parenthesis
name_leaf = node.children[3]
else:
name_leaf = node.children[0]
#set node type
if name_leaf.type == token_labels.NAME:
#(python) non-name or wildcard
if name_leaf.value == 'any':
new_node = MinNode(type=TYPE_ANY)
else:
if hasattr(token_labels, name_leaf.value):
new_node = MinNode(type=getattr(token_labels, name_leaf.value))
else:
new_node = MinNode(type=getattr(pysyms, name_leaf.value))
elif name_leaf.type == token_labels.STRING:
#(python) name or character; remove the apostrophes from
#the string value
name = name_leaf.value.strip("'")
if name in tokens:
new_node = MinNode(type=tokens[name])
else:
new_node = MinNode(type=token_labels.NAME, name=name)
elif name_leaf.type == syms.Alternatives:
new_node = reduce_tree(alternatives_node, parent)
#handle repeaters
if has_repeater:
if repeater_node.children[0].value == '*':
#reduce to None
new_node = None
elif repeater_node.children[0].value == '+':
#reduce to a single occurence i.e. do nothing
pass
else:
#TODO: handle {min, max} repeaters
raise NotImplementedError
pass
#add children
if details_node and new_node is not None:
for child in details_node.children[1:-1]:
#skip '<', '>' markers
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
if new_node:
new_node.parent = parent
return new_node
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len)
def rec_test(sequence, test_func):
"""Tests test_func on all items of sequence and items of included
sub-iterables"""
for x in sequence:
if isinstance(x, (list, tuple)):
for y in rec_test(x, test_func):
yield y
else:
yield test_func(x)
| apache-2.0 |
purpleidea/macaronic-net | django/contrib/gis/db/backends/spatialite/introspection.py | 401 | 2112 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
| agpl-3.0 |
Yannig/ansible | lib/ansible/utils/module_docs_fragments/mysql.py | 168 | 2909 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Jonathan Mainguy <jon@soh.re>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard mysql documentation fragment
DOCUMENTATION = '''
options:
login_user:
description:
- The username used to authenticate with.
required: false
default: null
login_password:
description:
- The password used to authenticate with.
required: false
default: null
login_host:
description:
- Host running the database.
required: false
default: localhost
login_port:
description:
- Port of the MySQL server. Requires I(login_host) be defined as other then localhost if login_port is used.
required: false
default: 3306
login_unix_socket:
description:
- The path to a Unix domain socket for local connections.
required: false
default: null
connect_timeout:
description:
- The connection timeout when connecting to the MySQL server.
required: false
default: 30
version_added: "2.1"
config_file:
description:
- Specify a config file from which user and password are to be read.
required: false
default: '~/.my.cnf'
version_added: "2.0"
ssl_ca:
required: false
default: null
version_added: "2.0"
description:
- The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server.
ssl_cert:
required: false
default: null
version_added: "2.0"
description:
- The path to a client public key certificate.
ssl_key:
required: false
default: null
version_added: "2.0"
description:
- The path to the client private key.
requirements:
- MySQLdb
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
is as easy as yum install MySQL-python. (See M(yum).)
- Both C(login_password) and C(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password.
'''
| gpl-3.0 |
resmo/ansible | test/units/executor/test_play_iterator.py | 10 | 18331 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.executor.play_iterator import HostState, PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_host_state(self):
hs = HostState(blocks=[x for x in range(0, 10)])
hs.tasks_child_state = HostState(blocks=[0])
hs.rescue_child_state = HostState(blocks=[1])
hs.always_child_state = HostState(blocks=[2])
hs.__repr__()
hs.run_state = 100
hs.__repr__()
hs.fail_state = 15
hs.__repr__()
for i in range(0, 10):
hs.cur_block = i
self.assertEqual(hs.get_current_block(), i)
new_hs = hs.copy()
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_iterator(self):
# import epdb; epdb.st()
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
- block:
- debug: msg="this is a block task"
- block:
- debug: msg="this is a sub-block in a block"
rescue:
- debug: msg="this is a rescue task"
- block:
- debug: msg="this is a sub-block in a rescue"
always:
- debug: msg="this is an always task"
- block:
- debug: msg="this is a sub-block in an always"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- name: role task
debug: msg="this is a role task"
- block:
- name: role block task
debug: msg="inside block in role"
always:
- name: role always task
debug: msg="always task in block in role"
- include: foo.yml
- name: role task after include
debug: msg="after include in role"
- block:
- name: starting role nested block 1
debug:
- block:
- name: role nested block 1 task 1
debug:
- name: role nested block 1 task 2
debug:
- name: role nested block 1 task 3
debug:
- name: end of role nested block 1
debug:
- name: starting role nested block 2
debug:
- block:
- name: role nested block 2 task 1
debug:
- name: role nested block 2 task 2
debug:
- name: role nested block 2 task 3
debug:
- name: end of role nested block 2
debug:
""",
'/etc/ansible/roles/test_role/tasks/foo.yml': """
- name: role included task
debug: msg="this is task in an include from a role"
"""
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, "role task")
self.assertIsNotNone(task._role)
# role block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role block task")
self.assertIsNotNone(task._role)
# role block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role always task")
self.assertIsNotNone(task._role)
# role include task
# (host_state, task) = itr.get_next_task_for_host(hosts[0])
# self.assertIsNotNone(task)
# self.assertEqual(task.action, 'debug')
# self.assertEqual(task.name, "role included task")
# self.assertIsNotNone(task._role)
# role task after include
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role task after include")
self.assertIsNotNone(task._role)
# role nested block tasks
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 2")
self.assertIsNotNone(task._role)
# regular play task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNone(task._role)
# block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a block task"))
# sub-block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a block"))
# mark the host failed
itr.mark_host_failed(hosts[0])
# block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a rescue task"))
# sub-block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue"))
# block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is an always task"))
# sub-block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in an always"))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# post task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
# host 0 shouldn't be in the failed hosts, as the error
# was handled by a rescue block
failed_hosts = itr.get_failed_hosts()
self.assertNotIn(hosts[0], failed_hosts)
def test_play_iterator_nested_blocks(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
tasks:
- block:
- block:
- block:
- block:
- block:
- debug: msg="this is the first task"
- ping:
rescue:
- block:
- block:
- block:
- block:
- debug: msg="this is the rescue task"
always:
- block:
- block:
- block:
- block:
- debug: msg="this is the always task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# get the first task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the first task'))
# fail the host
itr.mark_host_failed(hosts[0])
# get the resuce task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the rescue task'))
# get the always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the always task'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
def test_play_iterator_add_tasks(self):
fake_loader = DictDataLoader({
'test_play.yml': """
- hosts: all
gather_facts: no
tasks:
- debug: msg="dummy task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# test the high-level add_tasks() method
s = HostState(blocks=[0, 1, 2])
itr._insert_tasks_into_state = MagicMock(return_value=s)
itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()])
self.assertEqual(itr._host_states[hosts[0].name], s)
# now actually test the lower-level method that does the work
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# iterate past first task
_, task = itr.get_next_task_for_host(hosts[0])
while(task and task.action != 'debug'):
_, task = itr.get_next_task_for_host(hosts[0])
if task is None:
raise Exception("iterated past end of play while looking for place to insert tasks")
# get the current host state and copy it so we can mutate it
s = itr.get_host_state(hosts[0])
s_copy = s.copy()
# assert with an empty task list, or if we're in a failed state, we simply return the state as-is
res_state = itr._insert_tasks_into_state(s_copy, task_list=[])
self.assertEqual(res_state, s_copy)
s_copy.fail_state = itr.FAILED_TASKS
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
self.assertEqual(res_state, s_copy)
# but if we've failed with a rescue/always block
mock_task = MagicMock()
s_copy.run_state = itr.ITERATING_RESCUE
res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task])
self.assertEqual(res_state, s_copy)
self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue)
itr._host_states[hosts[0].name] = res_state
(next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True)
self.assertEqual(next_task, mock_task)
itr._host_states[hosts[0].name] = s
# test a regular insertion
s_copy = s.copy()
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
| gpl-3.0 |
dyoung418/tensorflow | tensorflow/contrib/timeseries/examples/multivariate_test.py | 91 | 1330 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the multivariate example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import multivariate
from tensorflow.python.platform import test
class MultivariateExampleTest(test.TestCase):
def test_shapes_structural(self):
times, values = multivariate.multivariate_train_and_sample(
export_directory=self.get_temp_dir(), training_steps=5)
self.assertAllEqual([1100], times.shape)
self.assertAllEqual([1100, 5], values.shape)
if __name__ == "__main__":
test.main()
| apache-2.0 |
listyque/TACTIC-Handler | thlib/side/client/tactic_client_lib/interpreter/callback.py | 1 | 3820 | ###########################################################
#
# Copyright (c) 2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['callback']
from interpreter import PipelineInterpreter
import cgi
class ClientCallbackException(Exception):
pass
class BaseClientCbk(object):
def set_ticket(self, ticket):
self.ticket = ticket
def set_options(self, options):
self.options = options
def get_option(self, name):
return self.options.get(name)
def _execute(self):
# get the server name
server_name = self.get_option("server_name")
if not server_name:
raise ClientCallbackException("No [server_name] option delivered to callback")
server_name = server_name[0]
# get the project
project_code = self.get_option("project_code")
if not project_code:
raise ClientCallbackException("No [project_code] option delivered to callback")
project_code = project_code[0]
# the entire pipeline is run in a TacticServer Transaction
from tactic_client_lib import TacticServerStub
self.server = TacticServerStub()
self.server.set_server(server_name)
self.server.set_ticket(self.ticket)
self.server.set_project(project_code)
self.server.start("Pipeline checkin")
try:
self.execute()
except Exception, e:
self.server.abort()
raise
else:
self.server.finish()
def execute_pipeline(self, pipeline_xml):
# execute the pipeline
interpreter = PipelineInterpreter(pipeline_xml)
interpreter.set_server(self.server)
interpreter.set_package(self.options)
interpreter.execute()
class ClientCbk(BaseClientCbk):
'''This callback executes a pipeline based on certain input parameters
'''
def execute(self):
# get the snapshot type passed in
snapshot_type_code = self.get_option("snapshot_type")
if not snapshot_type_code:
raise ClientCallbackException("No [snapshot_type] option delivered to callback")
snapshot_type_code = snapshot_type[0]
search_type = "sthpw/snapshot_type"
search_key = self.server.build_search_type(search_type, snapshot_type_code)
# get the pipeline
pipeline_xml = self.server.get_pipeline_xml(search_key)
self.execute_pipeline(pipeline_xml)
class ClientLoadCbk(BaseClientCbk):
'''This callback executes a pipeline based on certain input parameters
'''
def execute(self):
search_keys = self.get_option('search_key')
for search_key in search_keys:
print "search_key: ", search_key
# get the snapshot
snapshot = self.server.get_by_search_key(search_key)
# get the snapshot type from the snapshot
snapshot_type_code = snapshot.get('snapshot_type')
search_type = "sthpw/snapshot_type"
search_key = self.server.build_search_type(search_type, snapshot_type_code)
pipeline_xml = self.server.get_pipeline_xml(search_key)
# execute the pipeline
self.execute_pipeline(pipeline_xml)
#
# entry point
#
def callback(ticket, callback_class, options):
# NOTE: cgi.parse_qs creates arrays for *all* values
options = cgi.parse_qs(options)
print "options: ", options
# get the callback, if there is one
callback = eval("%s()" % callback_class)
callback.set_ticket(ticket)
callback.set_options(options)
callback._execute()
| epl-1.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.