repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
atmark-techno/atmark-dist | user/python/Lib/test/test_largefile.py | 10 | 3501 | #!python
#----------------------------------------------------------------------
# test largefile support on system where this makes sense
#
#XXX how to only run this when support is there
#XXX how to only optionally run this, it will take along time
#----------------------------------------------------------------------
import test_support
import os, struct, stat, sys
# only run if the current system support large files
f = open(test_support.TESTFN, 'w')
try:
# 2**31 == 2147483648
f.seek(2147483649L)
except OverflowError:
raise test_support.TestSkipped, "platform does not have largefile support"
else:
f.close()
# create >2GB file (2GB = 2147483648 bytes)
size = 2500000000L
name = test_support.TESTFN
# on Windows this test comsumes large resources:
# it takes a long time to build the >2GB file and takes >2GB of disk space
# therefore test_support.use_large_resources must be defined to run this test
if sys.platform[:3] == 'win' and not test_support.use_large_resources:
raise test_support.TestSkipped, \
"test requires %s bytes and a long time to run" % str(size)
def expect(got_this, expect_this):
if test_support.verbose:
print '%s =?= %s ...' % (`got_this`, `expect_this`),
if got_this != expect_this:
if test_support.verbose:
print 'no'
raise test_support.TestFailed, 'got %s, but expected %s' %\
(str(got_this), str(expect_this))
else:
if test_support.verbose:
print 'yes'
# test that each file function works as expected for a large (i.e. >2GB, do
# we have to check >4GB) files
if test_support.verbose:
print 'create large file via seek (may be sparse file) ...'
f = open(name, 'w')
f.seek(size)
f.write('a')
f.flush()
expect(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
if test_support.verbose:
print 'check file size with os.fstat'
f.close()
if test_support.verbose:
print 'check file size with os.stat'
expect(os.stat(name)[stat.ST_SIZE], size+1)
if test_support.verbose:
print 'play around with seek() and read() with the built largefile'
f = open(name, 'r')
expect(f.tell(), 0)
expect(f.read(1), '\000')
expect(f.tell(), 1)
f.seek(0)
expect(f.tell(), 0)
f.seek(0, 0)
expect(f.tell(), 0)
f.seek(42)
expect(f.tell(), 42)
f.seek(42, 0)
expect(f.tell(), 42)
f.seek(42, 1)
expect(f.tell(), 84)
f.seek(0, 1)
expect(f.tell(), 84)
f.seek(0, 2) # seek from the end
expect(f.tell(), size + 1 + 0)
f.seek(-10, 2)
expect(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
expect(f.tell(), 0)
f.seek(size)
expect(f.tell(), size)
expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
f.close()
if test_support.verbose:
print 'play around with os.lseek() with the built largefile'
f = open(name, 'r')
expect(os.lseek(f.fileno(), 0, 0), 0)
expect(os.lseek(f.fileno(), 42, 0), 42)
expect(os.lseek(f.fileno(), 42, 1), 84)
expect(os.lseek(f.fileno(), 0, 1), 84)
expect(os.lseek(f.fileno(), 0, 2), size+1+0)
expect(os.lseek(f.fileno(), -10, 2), size+1-10)
expect(os.lseek(f.fileno(), -size-1, 2), 0)
expect(os.lseek(f.fileno(), size, 0), size)
expect(f.read(1), 'a') # the 'a' that was written at the end of the file above
f.close()
# XXX add tests for truncate if it exists
# XXX has truncate ever worked on Windows? specifically on WinNT I get:
# "IOError: [Errno 13] Permission denied"
##try:
## newsize = size - 10
## f.seek(newsize)
## f.truncate()
## expect(f.tell(), newsize)
## newsize = newsize - 1
## f.seek(0)
## f.truncate(newsize)
## expect(f.tell(), newsize)
##except AttributeError:
## pass
os.unlink(name)
| gpl-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_expressions.py | 9 | 16557 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
import nose
import re
from numpy.random import randn
import operator
import numpy as np
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
import pandas.util.testing as tm
from numpy.testing.decorators import slow
if not expr._USE_NUMEXPR:
try:
import numexpr
except ImportError:
msg = "don't have"
else:
msg = "not using"
raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(), ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def tearDown(self):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
@nose.tools.nottest
def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:,0], self.integer.iloc[:, 0],
assert_series_equal, check_dtype=True)
@nose.tools.nottest
def run_binary_test(self, df, other, assert_func,
test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge',
'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operation %r" % arith)
com.pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic_test(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic_test(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False,
# **kwargs)
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True,
# **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic_test(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic_test(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@slow
def test_panel4d(self):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic_test(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic_test(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:, 0], self.integer.iloc[:, 0],
assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assertFalse(result)
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assertFalse(result)
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assertFalse(result)
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assertTrue(result)
def test_binary_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assertNotEqual(result, f._is_mixed_type)
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]:
op = getattr(operator,op)
result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assertNotEqual(result, f11._is_mixed_type)
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
for cond in [ True, False ]:
c = np.empty(f.shape,dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
tm.assert_numpy_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df, df)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, True)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(False, df.a)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(False, df)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| artistic-2.0 |
hzlf/openbroadcast | website/apps/ashop/migrations/0021_auto__add_releaseformat.py | 1 | 28643 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Releaseformat'
db.create_table('ashop_releaseformat', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('release', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ashop.Downloadrelease'])),
('format', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['alibrary.Format'])),
))
db.send_create_signal('ashop', ['Releaseformat'])
def backwards(self, orm):
# Deleting model 'Releaseformat'
db.delete_table('ashop_releaseformat')
models = {
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'multiple': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'b5717e19-9191-46f4-beba-29c15e4ecc6e'", 'max_length': '36'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'d57952b3-e4fd-4e5e-a8db-2a3608c0d9ce'", 'max_length': '36'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Profession']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_master'", 'null': 'True', 'to': "orm['filer.Audio']"}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'media_release'", 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False', 'db_index': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.release': {
'Meta': {'ordering': "('releasedate',)", 'object_name': 'Release'},
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_label'", 'to': "orm['alibrary.Label']"}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'ashop.downloadmedia': {
'Meta': {'ordering': "['name']", 'object_name': 'Downloadmedia', '_ormbases': ['shop.Product']},
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Format']", 'through': "orm['ashop.Mediaformat']", 'symmetrical': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mediaproduct'", 'to': "orm['alibrary.Media']"}),
'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'ashop.downloadrelease': {
'Meta': {'ordering': "['name']", 'object_name': 'Downloadrelease', '_ormbases': ['ashop.Releaseproduct']},
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Format']", 'through': "orm['ashop.Releaseformat']", 'symmetrical': 'False'}),
'releaseproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ashop.Releaseproduct']", 'unique': 'True', 'primary_key': 'True'})
},
'ashop.hardwarerelease': {
'Meta': {'ordering': "['name']", 'object_name': 'Hardwarerelease', '_ormbases': ['ashop.Releaseproduct']},
'circulation': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'needs_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'releaseproduct_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ashop.Releaseproduct']", 'unique': 'True', 'primary_key': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ashop.mediaformat': {
'Meta': {'object_name': 'Mediaformat'},
'format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Format']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ashop.Downloadmedia']"})
},
'ashop.releaseformat': {
'Meta': {'object_name': 'Releaseformat'},
'format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Format']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ashop.Downloadrelease']"})
},
'ashop.releaseproduct': {
'Meta': {'ordering': "['name']", 'object_name': 'Releaseproduct', '_ormbases': ['shop.Product']},
'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shop.Product']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releaseproduct'", 'to': "orm['alibrary.Release']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.audio': {
'Meta': {'object_name': 'Audio', '_ormbases': ['filer.File']},
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_shop.product_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False', 'db_index': 'True'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['ashop']
| gpl-3.0 |
axsauze/eventsfinder | django/contrib/localflavor/ch/forms.py | 101 | 3963 | """
Swiss-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.ch.ch_states import STATE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
id_re = re.compile(r"^(?P<idnumber>\w{8})(?P<pos9>(\d{1}|<))(?P<checksum>\d{1})$")
phone_digits_re = re.compile(r'^0([1-9]{1})\d{8}$')
class CHZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(CHZipCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class CHPhoneNumberField(Field):
"""
Validate local Swiss phone number (not international ones)
The correct format is '0XX XXX XX XX'.
'0XX.XXX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0XX XXX XX XX'.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 0XX XXX XX XX format.'),
}
def clean(self, value):
super(CHPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\.|\s|/|-)', '', smart_text(value))
m = phone_digits_re.search(value)
if m:
return '%s %s %s %s' % (value[0:3], value[3:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class CHStateSelect(Select):
"""
A Select widget that uses a list of CH states as its choices.
"""
def __init__(self, attrs=None):
super(CHStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class CHIdentityCardNumberField(Field):
"""
A Swiss identity card number.
Checks the following rules to determine whether the number is valid:
* Conforms to the X1234567<0 or 1234567890 format.
* Included checksums match calculated checksums
"""
default_error_messages = {
'invalid': _('Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.'),
}
def has_valid_checksum(self, number):
given_number, given_checksum = number[:-1], number[-1]
new_number = given_number
calculated_checksum = 0
fragment = ""
parameter = 7
first = str(number[:1])
if first.isalpha():
num = ord(first.upper()) - 65
if num < 0 or num > 8:
return False
new_number = str(num) + new_number[1:]
new_number = new_number[:8] + '0'
if not new_number.isdigit():
return False
for i in range(len(new_number)):
fragment = int(new_number[i])*parameter
calculated_checksum += fragment
if parameter == 1:
parameter = 7
elif parameter == 3:
parameter = 1
elif parameter ==7:
parameter = 3
return str(calculated_checksum)[-1] == given_checksum
def clean(self, value):
super(CHIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
idnumber, pos9, checksum = match.groupdict()['idnumber'], match.groupdict()['pos9'], match.groupdict()['checksum']
if idnumber == '00000000' or \
idnumber == 'A0000000':
raise ValidationError(self.error_messages['invalid'])
all_digits = "%s%s%s" % (idnumber, pos9, checksum)
if not self.has_valid_checksum(all_digits):
raise ValidationError(self.error_messages['invalid'])
return '%s%s%s' % (idnumber, pos9, checksum)
| bsd-3-clause |
PeterWangIntel/chromium-crosswalk | chrome/common/extensions/docs/server2/manifest_data_source_test.py | 87 | 6399 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
import json
import unittest
from future import Future
import manifest_data_source
from object_store_creator import ObjectStoreCreator
convert_and_annotate_docs = {
'name': {
'example': "My {{platform}}",
'name': 'name',
'level': 'required'
},
'doc2': {
'level': 'required',
'name': 'doc2'
},
'doc1': {
'level': 'required',
'name': 'doc1',
'children': {
'sub1': {
'annotations': ['not so important'],
'level': 'optional',
'name': 'sub1'
},
'sub2': {
'level': 'required',
'name': 'sub2'
}
}
},
'doc3': {
'level': 'only_one',
'name': 'doc3'
},
'doc4': {
'level': 'recommended',
'name': 'doc4'
},
'doc5': {
'level': 'only_one',
'name': 'doc5'
},
'doc6': {
'level': 'optional',
'name': 'doc6'
}
}
class ManifestDataSourceTest(unittest.TestCase):
def testListifyAndSortDocs(self):
expected_docs = [
{
'level': 'required',
'name': 'doc1',
'children': [
{
'level': 'required',
'name': 'sub2'
},
{
'annotations': ['not so important'],
'level': 'optional',
'name': 'sub1'
}
]
},
{
'level': 'required',
'name': 'doc2'
},
{
'level': 'required',
'example': '"My App"',
'has_example': True,
'name': 'name'
},
{
'level': 'recommended',
'name': 'doc4'
},
{
'level': 'only_one',
'name': 'doc3'
},
{
'level': 'only_one',
'name': 'doc5'
},
{
'level': 'optional',
'name': 'doc6'
}
]
self.assertEqual(expected_docs, manifest_data_source._ListifyAndSortDocs(
deepcopy(convert_and_annotate_docs), 'App'))
def testAnnotate(self):
expected_docs = [
{
'level': 'required',
'name': 'doc1',
'children': [
{
'level': 'required',
'name': 'sub2'
},
{
'annotations': ['Optional', 'not so important'],
'level': 'optional',
'name': 'sub1',
'is_last': True
}
]
},
{
'level': 'required',
'name': 'doc2'
},
{
'name': 'name',
'level': 'required',
'example': '"My App"',
'has_example': True
},
{
'annotations': ['Recommended'],
'level': 'recommended',
'name': 'doc4'
},
{
'annotations': ['Pick one (or none)'],
'level': 'only_one',
'name': 'doc3'
},
{
'level': 'only_one',
'name': 'doc5'
},
{
'annotations': ['Optional'],
'level': 'optional',
'name': 'doc6',
'is_last': True
}
]
annotated = manifest_data_source._ListifyAndSortDocs(
deepcopy(convert_and_annotate_docs), 'App')
manifest_data_source._AddLevelAnnotations(annotated)
self.assertEqual(expected_docs, annotated)
def testExpandedExamples(self):
docs = {
'doc1': {
'name': 'doc1',
'example': {
'big': {
'nested': {
'json_example': ['with', 'more', 'json']
}
}
}
}
}
expected_docs = [
{
'name': 'doc1',
'children': [
{
'name': 'big',
'children': [
{
'name': 'nested',
'children': [
{
'name': 'json_example',
'example': json.dumps(['with', 'more', 'json']),
'has_example': True
}
]
}
]
}
]
}
]
self.assertEqual(
expected_docs, manifest_data_source._ListifyAndSortDocs(docs, 'apps'))
def testNonExpandedExamples(self):
docs = {
'doc1': {
'name': 'doc1',
'example': {}
},
'doc2': {
'name': 'doc2',
'example': []
},
'doc3': {
'name': 'doc3',
'example': [{}]
}
}
expected_docs = [
{
'name': 'doc1',
'has_example': True,
'example': '{...}'
},
{
'name': 'doc2',
'has_example': True,
'example': '[...]'
},
{
'name': 'doc3',
'has_example': True,
'example': '[{...}]'
}
]
self.assertEqual(
expected_docs, manifest_data_source._ListifyAndSortDocs(docs, 'apps'))
def testManifestDataSource(self):
manifest_features = {
'doc1': {
'name': 'doc1',
'platforms': ['apps', 'extensions'],
'example': {},
'level': 'required'
},
'doc1.sub1': {
'name': 'doc1.sub1',
'platforms': ['apps'],
'annotations': ['important!'],
'level': 'recommended'
}
}
expected_app = [
{
'example': '{...}',
'has_example': True,
'level': 'required',
'name': 'doc1',
'platforms': ['apps', 'extensions'],
'children': [
{
'annotations': [
'Recommended',
'important!'
],
'level': 'recommended',
'name': 'sub1',
'platforms': ['apps'],
'is_last': True
}
],
'is_last': True
}
]
class FakePlatformBundle(object):
def GetFeaturesBundle(self, platform):
return FakeFeaturesBundle()
class FakeFeaturesBundle(object):
def GetManifestFeatures(self):
return Future(value=manifest_features)
class FakeServerInstance(object):
def __init__(self):
self.platform_bundle = FakePlatformBundle()
self.object_store_creator = ObjectStoreCreator.ForTest()
mds = manifest_data_source.ManifestDataSource(FakeServerInstance(), None)
self.assertEqual(expected_app, mds.get('apps'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mmnelemane/neutron | neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py | 21 | 2324 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from neutron.agent import securitygroups_rpc
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_constants
from neutron.plugins.ml2.drivers import mech_agent
from neutron.services.qos import qos_consts
LOG = log.getLogger(__name__)
class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using openvswitch L2 agent.
The OpenvswitchMechanismDriver integrates the ml2 plugin with the
openvswitch L2 agent. Port binding with this driver requires the
openvswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]
def __init__(self):
sg_enabled = securitygroups_rpc.is_firewall_enabled()
vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled,
portbindings.OVS_HYBRID_PLUG: sg_enabled}
super(OpenvswitchMechanismDriver, self).__init__(
constants.AGENT_TYPE_OVS,
portbindings.VIF_TYPE_OVS,
vif_details)
def get_allowed_network_types(self, agent):
return (agent['configurations'].get('tunnel_types', []) +
[p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT,
p_constants.TYPE_VLAN])
def get_mappings(self, agent):
return agent['configurations'].get('bridge_mappings', {})
def check_vlan_transparency(self, context):
"""Currently Openvswitch driver doesn't support vlan transparency."""
return False
| apache-2.0 |
MrLoick/python-for-android | tools/release.py | 69 | 1710 | #!/usr/bin/python
import googlecode_upload
import optparse
import os
import sys
PROJECT = 'android-scripting'
def upload(path, labels, options):
if options.dryrun:
return
summary = os.path.basename(path)
status, reason, url = googlecode_upload.upload_find_auth(
path, PROJECT, summary, labels, options.user, options.password)
def upload_language(name, version, options):
for archive in ('', '_extras', '_scripts'):
basename = '%s%s_r%d.zip' % (name, archive, version)
path = os.path.join(options.bindir, basename)
if os.path.exists(path):
print 'Uploading %s.' % path
upload(path, (), options)
else:
print 'No archive %s.' % path
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-b', '--bindir', dest='bindir',
help='The binary directory')
parser.add_option('-d', '--dryrun', action='store_true', dest='dryrun',
help='The binary directory')
options, args = parser.parse_args()
if options.user is None:
parser.error('Username is missing.')
if options.password is None:
parser.error('Password is missing.')
if options.bindir is None:
parser.error('Bindir is missing.')
if len(args) < 2:
parser.error('Must specify language and version to upload.')
upload_language(args[0], int(args[1]), options)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
cuongnv23/ansible | lib/ansible/modules/network/nxos/nxos_snmp_traps.py | 4 | 8775 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_traps
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP traps.
description:
- Manages SNMP traps configurations.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- This module works at the group level for traps. If you need to only
enable/disable 1 specific trap within a group, use the M(nxos_command)
module.
- Be aware that you can set a trap only for an enabled feature.
options:
group:
description:
- Case sensitive group.
required: true
choices: ['aaa', 'bridge', 'callhome', 'cfs', 'config', 'entity',
'feature-control', 'hsrp', 'license', 'link', 'lldp', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx', 'sysmgr', 'system',
'upgrade', 'vtp', 'all']
state:
description:
- Manage the state of the resource.
required: false
default: enabled
choices: ['enabled','disabled']
'''
EXAMPLES = '''
# ensure lldp trap configured
- nxos_snmp_traps:
group: lldp
state: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure lldp trap is not configured
- nxos_snmp_traps:
group: lldp
state: disabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "lldp"}
existing:
description: k/v pairs of existing trap status
returned: always
type: dict
sample: {"lldp": [{"enabled": "No",
"trap": "lldpRemTablesChange"}]}
end_state:
description: k/v pairs of trap info after module execution
returned: always
type: dict
sample: {"lldp": [{"enabled": "Yes",
"trap": "lldpRemTablesChange"}]}
updates:
description: command sent to the device
returned: always
type: list
sample: "snmp-server enable traps lldp ;"
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_traps(group, module):
command = 'show snmp trap'
body = execute_show_command(command, module)
trap_key = {
'description': 'trap',
'isEnabled': 'enabled'
}
resource = {}
try:
resource_table = body[0]['TABLE_snmp_trap']['ROW_snmp_trap']
for each_feature in ['aaa', 'bridge', 'callhome', 'cfs', 'config',
'entity', 'feature-control', 'hsrp', 'license',
'link', 'lldp', 'ospf', 'pim', 'rf', 'rmon',
'snmp', 'storm-control', 'stpx', 'sysmgr',
'system', 'upgrade', 'vtp']:
resource[each_feature] = []
for each_resource in resource_table:
key = str(each_resource['trap_type'])
mapped_trap = apply_key_map(trap_key, each_resource)
if key != 'Generic':
resource[key].append(mapped_trap)
except (KeyError, AttributeError):
return resource
find = resource.get(group, None)
if group == 'all'.lower():
return resource
elif find:
trap_resource = {group: resource[group]}
return trap_resource
else:
# if 'find' is None, it means that 'group' is a
# currently disabled feature.
return {}
def get_trap_commands(group, state, existing, module):
commands = []
enabled = False
disabled = False
if group == 'all':
if state == 'disabled':
for feature in existing:
trap_commands = ['no snmp-server enable traps {0}'.format(feature) for
trap in existing[feature] if trap['enabled'] == 'Yes']
trap_commands = list(set(trap_commands))
commands.append(trap_commands)
elif state == 'enabled':
for feature in existing:
trap_commands = ['snmp-server enable traps {0}'.format(feature) for
trap in existing[feature] if trap['enabled'] == 'No']
trap_commands = list(set(trap_commands))
commands.append(trap_commands)
else:
if group in existing:
for each_trap in existing[group]:
check = each_trap['enabled']
if check.lower() == 'yes':
enabled = True
if check.lower() == 'no':
disabled = True
if state == 'disabled' and enabled:
commands.append(['no snmp-server enable traps {0}'.format(group)])
elif state == 'enabled' and disabled:
commands.append(['snmp-server enable traps {0}'.format(group)])
else:
module.fail_json(msg='{0} is not a currently '
'enabled feature.'.format(group))
return commands
def main():
argument_spec = dict(
state=dict(choices=['enabled', 'disabled'], default='enabled'),
group=dict(choices=['aaa', 'bridge', 'callhome', 'cfs', 'config',
'entity', 'feature-control', 'hsrp',
'license', 'link', 'lldp', 'ospf', 'pim', 'rf',
'rmon', 'snmp', 'storm-control', 'stpx',
'sysmgr', 'system', 'upgrade', 'vtp', 'all'],
required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
group = module.params['group'].lower()
state = module.params['state']
existing = get_snmp_traps(group, module)
proposed = {'group': group}
changed = False
end_state = existing
commands = get_trap_commands(group, state, existing, module)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_traps(group, module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
317070/kaggle-heart | configurations/default.py | 1 | 2388 | import numpy as np
import objectives
from preprocess import sunny_preprocess, sunny_preprocess_validation, preprocess, preprocess_with_augmentation, \
sunny_preprocess_with_augmentation
import lasagne
from updates import build_nesterov_updates
from data_loader import generate_train_batch, generate_validation_batch, generate_test_batch
from functools import partial
import lasagne
from postprocess import postprocess
"""
When adding new configuration parameters, add the default values to this config file. This adds them to
all old config files automatically. Make sure this parameter does not change
the algorithm of the old files.
"""
caching = None # "memory" # by default, cache accessed files in memory
momentum = 0.9
rng = np.random
create_train_gen = generate_train_batch
create_eval_valid_gen = partial(generate_validation_batch, set="validation")
create_eval_train_gen = partial(generate_validation_batch, set="train")
create_test_gen = partial(generate_test_batch, set=None) # validate as well by default
sunny_preprocess_train = sunny_preprocess_with_augmentation
sunny_preprocess_validation = sunny_preprocess_validation
sunny_preprocess_test = sunny_preprocess_validation
preprocess_train = preprocess_with_augmentation
preprocess_validation = preprocess
preprocess_test = preprocess
cleaning_processes = []
test_time_augmentations = 100
postprocess = postprocess
build_updates = build_nesterov_updates
# In total, you train 'chunk_size' samples 'num_chunks_train' time, and you do updates every 'batch_size'
# you train until the train set has passed by 'num_epochs_train' times
num_epochs_train = 150
validate_every = 20
save_every = 20
restart_from_save = False
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
augmentation_params = {
"rotation": (0, 360),
"shear": (-10, 10),
"translation": (-8, 8),
}
data_sizes = {
"sliced:data": (30, 30, 100, 100), #30 mri_slices, 30 time steps, 100 px wide, 100 px high,
"sliced:data:shape": (2,),
# TBC with the metadata
}
learning_rate_schedule = {
0: 0.003,
400: 0.0003,
500: 0.00003,
}
def build_model():
return {
"inputs":[],
"output":None
}
def build_objective(l_ins, l_outs):
return objectives.LogLossObjective(l_outs) | mit |
quanvm009/codev7 | openerp/addons/purchase/tests/test_average_price.py | 18 | 8649 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from openerp.test.common import TransactionCase
from datetime import date
from openerp.tests import common
from openerp import netsvc
class TestAveragePrice(common.TransactionCase):
def setUp(self):
super(TestAveragePrice, self).setUp()
cr, uid, context = self.cr, self.uid, {}
self.ir_model_data = self.registry('ir.model.data')
self.product_product = self.registry('product.product')
self.purchase_order = self.registry('purchase.order')
self.purchase_order_line = self.registry('purchase.order.line')
self.pricelist = self.registry('product.pricelist')
self.stock_location = self.registry('stock.location')
self.stock_picking = self.registry('stock.picking')
self.stock_move = self.registry('stock.move')
self.stock_partial_move = self.registry('stock.partial.move')
self.stock_partial_move_line = self.registry('stock.partial.move.line')
self.partial_picking = self.registry('stock.partial.picking')
self.partial_picking_line = self.registry('stock.partial.picking.line')
change_product_qty = self.registry('stock.change.product.qty')
_, partner_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'res_partner_1')
_, pricelist_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'product', 'list0')
_, self.location_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
_, self.supplier_location_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')
_, input_account_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'account', 'xfa')
_, output_account_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'account', 'xfa')
wf_service = netsvc.LocalService("workflow")
self.standard_price = 10
self.order_price_unit = 20
self.available_qty = 1
self.order_qty = 1
self.picking_qty = 1
self.product_id = self.product_product.create(cr, uid, {
'name': 'Average product',
'cost_method': 'average',
'valuation': 'real_time',
'property_stock_account_input': input_account_id,
'property_stock_account_output': output_account_id,
}, context=context)
self.product_product.do_change_standard_price(
cr, uid, [self.product_id], {
'new_price': self.standard_price,
'stock_input_account': input_account_id,
'stock_output_account': output_account_id})
change_product_qty_id = change_product_qty.create(
cr, uid, {
'location_id': self.location_id,
'new_quantity': self.available_qty,
'product_id': self.product_id})
change_product_qty.change_product_qty(
cr, uid, [change_product_qty_id], {
'active_model': 'product.product',
'active_id': self.product_id,
'active_ids': [self.product_id]})
self.po_01_id = self.purchase_order.create(cr, uid, {
'partner_id': partner_id,
'location_id': self.location_id,
'pricelist_id': pricelist_id,
}, context=context)
self.order_line_10 = self.purchase_order_line.create(cr, uid, {
'order_id': self.po_01_id,
'product_id': self.product_id,
'name': 'description',
'date_planned': date.today(),
'product_qty': self.order_qty,
'price_unit': self.order_price_unit
}, context=context)
wf_service.trg_validate(uid, 'purchase.order', self.po_01_id, 'purchase_confirm', cr)
def test_10_stock_move_action_done(self):
cr, uid, context = self.cr, self.uid, {}
picking_id = self.purchase_order.read(cr, uid, [self.po_01_id], ['picking_ids'])[0]['picking_ids']
move_lines_ids = self.stock_picking.read(cr, uid, picking_id, ['move_lines'])[0]['move_lines']
for move in self.stock_move.browse(cr, uid, move_lines_ids, context=context):
move.action_done()
new_price = self.product_product.read(cr, uid, self.product_id, ['standard_price'], context=context)['standard_price']
self.assertAlmostEqual(
new_price,
(self.available_qty * self.standard_price + self.order_qty * self.order_price_unit)
/(self.available_qty + self.order_qty))
def test_20_partial_stock_move(self):
cr, uid, context = self.cr, self.uid, {}
picking_ids = self.purchase_order.read(cr, uid, [self.po_01_id], ['picking_ids'])[0]['picking_ids']
product = self.product_product.browse(cr, uid, self.product_id, context=context)
partial_move_id = self.stock_partial_move.create(cr, uid, {
'date': date.today(),
'picking_id': picking_ids[0]
}, context=context)
move_lines_ids = self.stock_picking.read(cr, uid, picking_ids, ['move_lines'])[0]['move_lines']
for move in self.stock_move.browse(cr, uid, move_lines_ids, context=context):
self.stock_partial_move_line.create(cr, uid, {
'product_id': self.product_id,
'quantity': self.picking_qty,
'product_uom': product.uom_id.id,
'location_dest_id': self.location_id,
'location_id': self.supplier_location_id,
'move_id': move.id,
'cost': self.order_price_unit,
'wizard_id': partial_move_id,
}, context=context)
self.stock_partial_move.do_partial(cr, uid, [partial_move_id], context=context)
new_price = self.product_product.read(cr, uid, self.product_id, ['standard_price'], context=context)['standard_price']
self.assertAlmostEqual(
new_price,
(self.available_qty * self.standard_price + self.order_qty * self.order_price_unit)
/(self.available_qty + self.order_qty))
def test_30_partial_stock_picking(self):
cr, uid, context = self.cr, self.uid, {}
picking_ids = self.purchase_order.read(cr, uid, [self.po_01_id], ['picking_ids'])[0]['picking_ids']
product = self.product_product.browse(cr, uid, self.product_id, context=context)
partial_picking_id = self.partial_picking.create(cr, uid, {
'date': date.today(),
'picking_id': picking_ids[0],
}, context=context)
move_lines_ids = self.stock_picking.read(cr, uid, picking_ids, ['move_lines'])[0]['move_lines']
for move in self.stock_move.browse(cr, uid, move_lines_ids, context=context):
self.partial_picking_line.create(cr, uid, {
'product_id': self.product_id,
'quantity': self.picking_qty,
'product_uom': product.uom_id.id,
'location_dest_id': self.location_id,
'location_id': self.supplier_location_id,
'move_id': move.id,
'cost': self.order_price_unit,
'wizard_id': partial_picking_id,
}, context=context)
self.partial_picking.do_partial(cr, uid, [partial_picking_id], context=context)
new_price = self.product_product.read(cr, uid, self.product_id, ['standard_price'], context=context)['standard_price']
self.assertAlmostEqual(
new_price,
(self.available_qty * self.standard_price + self.order_qty * self.order_price_unit)
/(self.available_qty + self.order_qty))
| agpl-3.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/test/script_helper.py | 60 | 6902 | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import importlib
import sys
import os
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
import zipfile
from importlib.util import source_from_cache
from test.support import make_legacy_pyc, strip_python_stderr, temp_dir
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
if '__isolated' in env_vars:
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars
cmd_line = [sys.executable, '-X', 'faulthandler']
if isolated:
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
cmd_line.append('-I')
elif not env_vars:
# ignore Python environment variables
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.
"""
return _assert_python(False, *args, **env_vars)
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
"""Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.
"""
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
# Under Fedora (?), GNU readline can output junk on stderr when initialized,
# depending on the TERM setting. Setting TERM=vt100 is supposed to disable
# that. References:
# - http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
# - http://stackoverflow.com/questions/15760712/python-readline-module-prints-escape-character-during-import
# - http://lists.gnu.org/archive/html/bug-readline/2007-08/msg00004.html
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=stdout, stderr=stderr,
**kw)
def kill_python(p):
"""Run the given Popen process until completion and return stdout."""
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def make_script(script_dir, script_basename, source, omit_suffix=False):
script_filename = script_basename
if not omit_suffix:
script_filename += os.extsep + 'py'
script_name = os.path.join(script_dir, script_filename)
# The script should be encoded to UTF-8, the default string encoding
script_file = open(script_name, 'w', encoding='utf-8')
script_file.write(source)
script_file.close()
importlib.invalidate_caches()
return script_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
parts = script_name.split(os.sep)
if len(parts) >= 2 and parts[-2] == '__pycache__':
legacy_pyc = make_legacy_pyc(source_from_cache(script_name))
name_in_zip = os.path.basename(legacy_pyc)
script_name = legacy_pyc
else:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir, init_source=''):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', init_source)
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = py_compile.compile(init_name, doraise=True)
script_name = py_compile.compile(script_name, doraise=True)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
| lgpl-3.0 |
foreni-packages/golismero | thirdparty_libs/django/templatetags/static.py | 114 | 4022 | try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
| gpl-2.0 |
ksrajkumar/openerp-6.1 | openerp/addons/wiki/__init__.py | 9 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wiki
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
michaelkuty/django-oscar | src/oscar/apps/payment/migrations/0001_initial.py | 51 | 4813 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
from django.conf import settings
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bankcard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_type', models.CharField(max_length=128, verbose_name='Card Type')),
('name', models.CharField(max_length=255, verbose_name='Name', blank=True)),
('number', models.CharField(max_length=32, verbose_name='Number')),
('expiry_date', models.DateField(verbose_name='Expiry Date')),
('partner_reference', models.CharField(max_length=255, verbose_name='Partner Reference', blank=True)),
('user', models.ForeignKey(verbose_name='User', related_name='bankcards', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Bankcards',
'verbose_name': 'Bankcard',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(default='GBP', max_length=12, verbose_name='Currency')),
('amount_allocated', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Allocated')),
('amount_debited', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Debited')),
('amount_refunded', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Refunded')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('label', models.CharField(max_length=128, verbose_name='Label', blank=True)),
('order', models.ForeignKey(verbose_name='Order', related_name='sources', to='order.Order')),
],
options={
'verbose_name_plural': 'Sources',
'verbose_name': 'Source',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SourceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', editable=False, max_length=128, help_text='This is used within forms to identify this source type', blank=True)),
],
options={
'verbose_name_plural': 'Source Types',
'verbose_name': 'Source Type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('txn_type', models.CharField(max_length=128, verbose_name='Type', blank=True)),
('amount', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Amount')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('status', models.CharField(max_length=128, verbose_name='Status', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('source', models.ForeignKey(verbose_name='Source', related_name='transactions', to='payment.Source')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Transactions',
'verbose_name': 'Transaction',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='source',
name='source_type',
field=models.ForeignKey(verbose_name='Source Type', related_name='sources', to='payment.SourceType'),
preserve_default=True,
),
]
| bsd-3-clause |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/backup/test_backup_replay_vm.py | 2 | 9863 | import os
import time
import random
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.operations.scenario_operations as sce_ops
import zstackwoodpecker.header.host as host_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Path = [[]]
index = 0
tag = "VM_TEST_REBOOT"
backup = None
def record(fun):
def recorder(vm, op):
global index
if op != tag:
Path[index].append(op)
elif op == tag:
Path.append([op])
Path[index].append(op)
index += 1
return fun(vm, op)
return recorder
VM_RUNGGING_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"VM_TEST_NONE"
]
VM_STOPPED_OPS = [
"VM_TEST_SNAPSHOT",
"VM_TEST_CREATE_IMG",
"VM_TEST_RESIZE_RVOL",
"VM_TEST_CHANGE_OS",
"VM_TEST_RESET",
"VM_TEST_NONE"
]
VM_STATE_OPS = [
"VM_TEST_STOP",
"VM_TEST_REBOOT",
"VM_TEST_NONE"
]
@record
def vm_op_test(vm, op):
test_util.test_logger(vm.vm.name + "-------" + op)
ops = {
"VM_TEST_STOP": stop,
"VM_TEST_REBOOT": reboot,
"VM_TEST_NONE": do_nothing,
"VM_TEST_MIGRATE": migrate,
"VM_TEST_SNAPSHOT": create_snapshot,
"VM_TEST_CREATE_IMG": create_image,
"VM_TEST_RESIZE_RVOL": resize_rvol,
"VM_TEST_CHANGE_OS": change_os,
"VM_TEST_RESET": reset,
"VM_TEST_BACKUP": back_up
}
ops[op](vm)
def stop(vm):
vm.stop()
def reboot(vm):
vm.reboot()
def do_nothing(vm):
pass
def reset(vm):
vm.reinit()
def migrate(vm_obj):
ps = test_lib.lib_get_primary_storage_by_vm(vm_obj.get_vm())
if ps.type in [inventory.CEPH_PRIMARY_STORAGE_TYPE, 'SharedMountPoint', inventory.NFS_PRIMARY_STORAGE_TYPE,
'SharedBlock']:
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vm_obj.migrate(target_host.uuid)
elif ps.type in [inventory.LOCAL_STORAGE_TYPE]:
vm_obj.check()
target_host = test_lib.lib_find_random_host(vm_obj.vm)
vol_ops.migrate_volume(vm_obj.get_vm().allVolumes[0].uuid, target_host.uuid)
vm_obj.start()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
else:
test_util.test_fail("FOUND NEW STORAGTE TYPE. FAILED")
def create_snapshot(vm_obj):
vol_obj = zstack_volume_header.ZstackTestVolume()
vol_obj.set_volume(test_lib.lib_get_root_volume(vm_obj.get_vm()))
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(vm_obj)
snapshots_root.set_target_volume(vol_obj)
snapshots_root.create_snapshot('create_data_snapshot1')
snapshots_root.check()
sp1 = snapshots_root.get_current_snapshot()
#vm_obj.stop()
#vm_obj.check()
#snapshots_root.use_snapshot(sp1)
#vm_obj.start()
#test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def create_image(vm_obj):
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm_obj.vm)
image_option = test_util.ImageOption()
image_option.set_root_volume_uuid(volume_uuid)
image_option.set_name('image_resize_template')
image_option.set_backup_storage_uuid_list([bs_list[0].uuid])
image = img_ops.create_root_volume_template(image_option)
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.set_image(image)
new_image.check()
new_image.clean()
def resize_rvol(vm_obj):
vol_size = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
volume_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid
set_size = 1024 * 1024 * 1024 + int(vol_size)
vol_ops.resize_volume(volume_uuid, set_size)
vm_obj.update()
vol_size_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).size
# if set_size != vol_size_after:
# test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after)
# vm_obj.check()
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def change_os(vm_obj):
vm_uuid = vm_obj.get_vm().uuid
last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
last_ps_uuid = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
image_uuid = random.choice(res_ops.query_resource(res_ops.IMAGE, cond)).uuid
vm_ops.change_vm_image(vm_uuid, image_uuid)
vm_obj.start()
vm_obj.update()
# check whether the vm is running successfully
test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
# check whether the network config has changed
l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm_obj.get_vm())
if l3network_uuid_after != last_l3network_uuid:
test_util.test_fail('Change VM Image Failed.The Network config has changed.')
# check whether primarystorage has changed
ps_uuid_after = test_lib.lib_get_root_volume(vm_obj.get_vm()).primaryStorageUuid
if ps_uuid_after != last_ps_uuid:
test_util.test_fail('Change VM Image Failed.Primarystorage has changed.')
def back_up(vm_obj):
global backup
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
backup_option = test_util.BackupOption()
backup_option.set_name("test_compare")
backup_option.set_volume_uuid(test_lib.lib_get_root_volume(vm_obj.get_vm()).uuid)
backup_option.set_backupStorage_uuid(bs.uuid)
backup = vol_ops.create_backup(backup_option)
def print_path(Path):
print("=" * 43 + "PATH" + "=" * 43)
for i in range(len(Path)):
path = ''
for j in range(len(Path[i])):
if j == len(Path[i]) - 1:
path += Path[i][j]
else:
path += (Path[i][j] + " --> ")
print(path)
print("=" * 90)
def test():
global test_obj_dict, VM_RUNGGING_OPS, VM_STOPPED_OPS, VM_STATE_OPS, backup
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
vm_name = "test_vm"
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name
cond = res_ops.gen_query_conditions("category", '=', "Private")
l3_name = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].name
vm = test_stub.create_vm(vm_name, img_name, l3_name)
path = "VM_TEST_REBOOT --> VM_TEST_MIGRATE --> VM_TEST_BACKUP --> VM_TEST_NONE --> VM_TEST_CREATE_IMG --> VM_TEST_BACKUP"
path_array = path.split(" --> ")
for i in path_array:
if i == "VM_TEST_MIGRATE" and ps.type == inventory.LOCAL_STORAGE_TYPE:
vm.stop()
vm_op_test(vm, i)
continue
if vm.state == "Stopped":
vm.start()
if i == "VM_TEST_BACKUP":
if test_lib.lib_is_vm_l3_has_vr(vm.vm):
test_lib.TestHarness = test_lib.TestHarnessVR
time.sleep(60)
cmd = "echo 111 > /root/" + str(int(time.time()))
test_lib.lib_execute_command_in_vm(vm.vm,cmd)
vm.suspend()
# create_snapshot/backup
vm_op_test(vm, "VM_TEST_BACKUP")
# compare vm & image created by backup
compare(ps, vm, backup)
vm.resume()
else:
vm_op_test(vm, i)
test_util.test_pass("path: " + path + " test pass")
def error_cleanup():
global test_obj_dict
print_path(Path)
def compare(ps, vm, backup):
test_util.test_logger("-----------------compare----------------")
# find vm_host
host = test_lib.lib_find_host_by_vm(vm.vm)
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
root_volume = test_lib.lib_get_root_volume(vm.get_vm())
vm_path = root_volume.installPath
if ps.type == "SharedBlock":
vm_path = "/dev/" + root_volume.installPath.split("/")[2] + "/" + root_volume.installPath.split("/")[3]
test_util.test_logger(vm_path)
name = backup.backupStorageRefs[0].installPath.split("/")[2]
id = backup.backupStorageRefs[0].installPath.split("/")[3]
# compare vm_root_volume & image
cmd = "mkdir /root/%s;" \
"/usr/local/zstack/imagestore/bin/zstcli " \
"-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \
"-url=%s:8000 " \
"pull -installpath /root/%s/old.qcow2 %s:%s;" \
"qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vm_path, id)
# clean image
result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300)
if result != "Images are identical.\n":
test_util.test_fail("compare vm_root_volume & image created by backup")
| apache-2.0 |
matthewoliver/swift | test/unit/common/middleware/test_memcache.py | 3 | 15123 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from textwrap import dedent
import unittest
import mock
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.middleware import memcache
from swift.common.memcached import MemcacheRing
from swift.common.swob import Request
from swift.common.wsgi import loadapp
from test.unit import with_tempdir, patch_policies
class FakeApp(object):
def __call__(self, env, start_response):
return env
class ExcConfigParser(object):
def read(self, path):
raise Exception('read called with %r' % path)
class EmptyConfigParser(object):
def read(self, path):
return False
def get_config_parser(memcache_servers='1.2.3.4:5',
memcache_serialization_support='1',
memcache_max_connections='4',
section='memcache'):
_srvs = memcache_servers
_sers = memcache_serialization_support
_maxc = memcache_max_connections
_section = section
class SetConfigParser(object):
def items(self, section_name):
if section_name != section:
raise NoSectionError(section_name)
return {
'memcache_servers': memcache_servers,
'memcache_serialization_support':
memcache_serialization_support,
'memcache_max_connections': memcache_max_connections,
}
def read(self, path):
return True
def get(self, section, option):
if _section == section:
if option == 'memcache_servers':
if _srvs == 'error':
raise NoOptionError(option, section)
return _srvs
elif option == 'memcache_serialization_support':
if _sers == 'error':
raise NoOptionError(option, section)
return _sers
elif option in ('memcache_max_connections',
'max_connections'):
if _maxc == 'error':
raise NoOptionError(option, section)
return _maxc
else:
raise NoOptionError(option, section)
else:
raise NoSectionError(option)
return SetConfigParser
def start_response(*args):
pass
class TestCacheMiddleware(unittest.TestCase):
def setUp(self):
self.app = memcache.MemcacheMiddleware(FakeApp(), {})
def test_cache_middleware(self):
req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertTrue('swift.cache' in resp)
self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))
def test_conf_default_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
for d in ({},
{'memcache_servers': '6.7.8.9:10'},
{'memcache_serialization_support': '0'},
{'memcache_max_connections': '30'},
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'},
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '30'},
{'memcache_serialization_support': '0',
'memcache_max_connections': '30'}
):
with self.assertRaises(Exception) as catcher:
memcache.MemcacheMiddleware(FakeApp(), d)
self.assertEqual(
str(catcher.exception),
"read called with '/etc/swift/memcache.conf'")
def test_conf_set_no_read(self):
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
exc = None
try:
memcache.MemcacheMiddleware(
FakeApp(), {'memcache_servers': '1.2.3.4:5',
'memcache_serialization_support': '2',
'memcache_max_connections': '30'})
except Exception as err:
exc = err
self.assertIsNone(exc)
def test_conf_default(self):
with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'memcache_max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_extra_no_section(self):
with mock.patch.object(memcache, 'ConfigParser',
get_config_parser(section='foobar')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_extra_no_option(self):
replacement_parser = get_config_parser(
memcache_servers='error', memcache_serialization_support='error',
memcache_max_connections='error')
with mock.patch.object(memcache, 'ConfigParser', replacement_parser):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, False)
self.assertEqual(
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline_other_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': '5'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0',
'max_connections': 'bad42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 4)
def test_conf_from_extra_conf_bad_max_conn(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser(
memcache_max_connections='bad42')):
app = memcache.MemcacheMiddleware(FakeApp(), {})
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['1.2.3.4:5'].max_size, 2)
def test_conf_from_inline_and_maxc_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_serialization_support': '0'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, True)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_inline_and_sers_from_extra_conf(self):
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
app = memcache.MemcacheMiddleware(
FakeApp(),
{'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '42'})
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
self.assertEqual(app.memcache._allow_pickle, False)
self.assertEqual(app.memcache._allow_unpickle, True)
self.assertEqual(
app.memcache._client_cache['6.7.8.9:10'].max_size, 42)
def test_filter_factory(self):
factory = memcache.filter_factory({'max_connections': '3'},
memcache_servers='10.10.10.10:10',
memcache_serialization_support='1')
thefilter = factory('myapp')
self.assertEqual(thefilter.app, 'myapp')
self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10')
self.assertEqual(thefilter.memcache._allow_pickle, False)
self.assertEqual(thefilter.memcache._allow_unpickle, True)
self.assertEqual(
thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3)
@patch_policies
def _loadapp(self, proxy_config_path):
"""
Load a proxy from an app.conf to get the memcache_ring
:returns: the memcache_ring of the memcache middleware filter
"""
with mock.patch('swift.proxy.server.Ring'):
app = loadapp(proxy_config_path)
memcache_ring = None
while True:
memcache_ring = getattr(app, 'memcache', None)
if memcache_ring:
break
app = app.app
return memcache_ring
@with_tempdir
def test_real_config(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
# only one server by default
self.assertEqual(memcache_ring._client_cache.keys(),
['127.0.0.1:11211'])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 0.3)
self.assertEqual(memcache_ring._pool_timeout, 1.0)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 1)
self.assertEqual(memcache_ring._io_timeout, 2.0)
@with_tempdir
def test_real_config_with_options(self, tempdir):
config = """
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 1.0
pool_timeout = 0.5
tries = 4
io_timeout = 1.0
"""
config_path = os.path.join(tempdir, 'test.conf')
with open(config_path, 'w') as f:
f.write(dedent(config))
memcache_ring = self._loadapp(config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# extra options
self.assertEqual(memcache_ring._connect_timeout, 1.0)
self.assertEqual(memcache_ring._pool_timeout, 0.5)
# tries is limited to server count
self.assertEqual(memcache_ring._tries, 4)
self.assertEqual(memcache_ring._io_timeout, 1.0)
@with_tempdir
def test_real_memcache_config(self, tempdir):
proxy_config = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
connect_timeout = 1.0
""" % tempdir
proxy_config_path = os.path.join(tempdir, 'test.conf')
with open(proxy_config_path, 'w') as f:
f.write(dedent(proxy_config))
memcache_config = """
[memcache]
memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211,
10.0.0.4:11211
connect_timeout = 0.5
io_timeout = 1.0
"""
memcache_config_path = os.path.join(tempdir, 'memcache.conf')
with open(memcache_config_path, 'w') as f:
f.write(dedent(memcache_config))
memcache_ring = self._loadapp(proxy_config_path)
self.assertEqual(sorted(memcache_ring._client_cache.keys()),
['10.0.0.%d:11211' % i for i in range(1, 5)])
# proxy option takes precedence
self.assertEqual(memcache_ring._connect_timeout, 1.0)
# default tries are not limited by servers
self.assertEqual(memcache_ring._tries, 3)
# memcache conf options are defaults
self.assertEqual(memcache_ring._io_timeout, 1.0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
factorlibre/openerp-server-6.1 | openerp/pychart/afm/Utopia_BoldItalic.py | 15 | 1518 | # -*- coding: utf-8 -*-
# AFM font Utopia-BoldItalic (path: /usr/share/fonts/afms/adobe/putbi8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Utopia-BoldItalic"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 210, 285, 455, 560, 560, 896, 752, 246, 350, 350, 500, 600, 280, 392, 280, 260, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 280, 280, 600, 600, 600, 454, 828, 634, 680, 672, 774, 622, 585, 726, 800, 386, 388, 688, 586, 921, 741, 761, 660, 761, 681, 551, 616, 776, 630, 920, 630, 622, 618, 350, 460, 350, 600, 500, 246, 596, 586, 456, 609, 476, 348, 522, 629, 339, 333, 570, 327, 914, 635, 562, 606, 584, 440, 417, 359, 634, 518, 795, 516, 489, 466, 340, 265, 340, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 285, 560, 560, 100, 560, 560, 568, 560, 246, 455, 560, 360, 360, 651, 652, 500, 500, 514, 490, 280, 500, 580, 465, 246, 455, 455, 560, 1000, 1297, 500, 454, 500, 400, 400, 400, 400, 400, 400, 402, 400, 500, 400, 400, 500, 400, 350, 400, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 890, 500, 444, 500, 500, 500, 500, 592, 761, 1016, 412, 500, 500, 500, 500, 500, 789, 500, 500, 500, 339, 500, 500, 339, 562, 811, 628, )
| agpl-3.0 |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/tracemalloc.py | 83 | 15651 | from collections import Sequence, Iterable
from functools import total_ordering
import fnmatch
import linecache
import os.path
import pickle
# Import types and functions implemented in C
from _tracemalloc import *
from _tracemalloc import _get_object_traceback, _get_traces
def _format_size(size, sign):
for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
if abs(size) < 100 and unit != 'B':
# 3 digits (xx.x UNIT)
if sign:
return "%+.1f %s" % (size, unit)
else:
return "%.1f %s" % (size, unit)
if abs(size) < 10 * 1024 or unit == 'TiB':
# 4 or 5 digits (xxxx UNIT)
if sign:
return "%+.0f %s" % (size, unit)
else:
return "%.0f %s" % (size, unit)
size /= 1024
class Statistic:
"""
Statistic difference on memory allocations between two Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'count')
def __init__(self, traceback, size, count):
self.traceback = traceback
self.size = size
self.count = count
def __hash__(self):
return hash((self.traceback, self.size, self.count))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.count == other.count)
def __str__(self):
text = ("%s: size=%s, count=%i"
% (self.traceback,
_format_size(self.size, False),
self.count))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<Statistic traceback=%r size=%i count=%i>'
% (self.traceback, self.size, self.count))
def _sort_key(self):
return (self.size, self.count, self.traceback)
class StatisticDiff:
"""
Statistic difference on memory allocations between an old and a new
Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
def __init__(self, traceback, size, size_diff, count, count_diff):
self.traceback = traceback
self.size = size
self.size_diff = size_diff
self.count = count
self.count_diff = count_diff
def __hash__(self):
return hash((self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.size_diff == other.size_diff
and self.count == other.count
and self.count_diff == other.count_diff)
def __str__(self):
text = ("%s: size=%s (%s), count=%i (%+i)"
% (self.traceback,
_format_size(self.size, False),
_format_size(self.size_diff, True),
self.count,
self.count_diff))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<StatisticDiff traceback=%r size=%i (%+i) count=%i (%+i)>'
% (self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def _sort_key(self):
return (abs(self.size_diff), self.size,
abs(self.count_diff), self.count,
self.traceback)
def _compare_grouped_stats(old_group, new_group):
statistics = []
for traceback, stat in new_group.items():
previous = old_group.pop(traceback, None)
if previous is not None:
stat = StatisticDiff(traceback,
stat.size, stat.size - previous.size,
stat.count, stat.count - previous.count)
else:
stat = StatisticDiff(traceback,
stat.size, stat.size,
stat.count, stat.count)
statistics.append(stat)
for traceback, stat in old_group.items():
stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
statistics.append(stat)
return statistics
@total_ordering
class Frame:
"""
Frame of a traceback.
"""
__slots__ = ("_frame",)
def __init__(self, frame):
# frame is a tuple: (filename: str, lineno: int)
self._frame = frame
@property
def filename(self):
return self._frame[0]
@property
def lineno(self):
return self._frame[1]
def __eq__(self, other):
return (self._frame == other._frame)
def __lt__(self, other):
return (self._frame < other._frame)
def __hash__(self):
return hash(self._frame)
def __str__(self):
return "%s:%s" % (self.filename, self.lineno)
def __repr__(self):
return "<Frame filename=%r lineno=%r>" % (self.filename, self.lineno)
@total_ordering
class Traceback(Sequence):
"""
Sequence of Frame instances sorted from the most recent frame
to the oldest frame.
"""
__slots__ = ("_frames",)
def __init__(self, frames):
Sequence.__init__(self)
# frames is a tuple of frame tuples: see Frame constructor for the
# format of a frame tuple
self._frames = frames
def __len__(self):
return len(self._frames)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Frame(trace) for trace in self._frames[index])
else:
return Frame(self._frames[index])
def __contains__(self, frame):
return frame._frame in self._frames
def __hash__(self):
return hash(self._frames)
def __eq__(self, other):
return (self._frames == other._frames)
def __lt__(self, other):
return (self._frames < other._frames)
def __str__(self):
return str(self[0])
def __repr__(self):
return "<Traceback %r>" % (tuple(self),)
def format(self, limit=None):
lines = []
if limit is not None and limit < 0:
return lines
for frame in self[:limit]:
lines.append(' File "%s", line %s'
% (frame.filename, frame.lineno))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
lines.append(' %s' % line)
return lines
def get_object_traceback(obj):
"""
Get the traceback where the Python object *obj* was allocated.
Return a Traceback instance.
Return None if the tracemalloc module is not tracing memory allocations or
did not trace the allocation of the object.
"""
frames = _get_object_traceback(obj)
if frames is not None:
return Traceback(frames)
else:
return None
class Trace:
"""
Trace of a memory block.
"""
__slots__ = ("_trace",)
def __init__(self, trace):
# trace is a tuple: (size, traceback), see Traceback constructor
# for the format of the traceback tuple
self._trace = trace
@property
def size(self):
return self._trace[0]
@property
def traceback(self):
return Traceback(self._trace[1])
def __eq__(self, other):
return (self._trace == other._trace)
def __hash__(self):
return hash(self._trace)
def __str__(self):
return "%s: %s" % (self.traceback, _format_size(self.size, False))
def __repr__(self):
return ("<Trace size=%s, traceback=%r>"
% (_format_size(self.size, False), self.traceback))
class _Traces(Sequence):
def __init__(self, traces):
Sequence.__init__(self)
# traces is a tuple of trace tuples: see Trace constructor
self._traces = traces
def __len__(self):
return len(self._traces)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Trace(trace) for trace in self._traces[index])
else:
return Trace(self._traces[index])
def __contains__(self, trace):
return trace._trace in self._traces
def __eq__(self, other):
return (self._traces == other._traces)
def __repr__(self):
return "<Traces len=%s>" % len(self)
def _normalize_filename(filename):
filename = os.path.normcase(filename)
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
return filename
class Filter:
def __init__(self, inclusive, filename_pattern,
lineno=None, all_frames=False):
self.inclusive = inclusive
self._filename_pattern = _normalize_filename(filename_pattern)
self.lineno = lineno
self.all_frames = all_frames
@property
def filename_pattern(self):
return self._filename_pattern
def __match_frame(self, filename, lineno):
filename = _normalize_filename(filename)
if not fnmatch.fnmatch(filename, self._filename_pattern):
return False
if self.lineno is None:
return True
else:
return (lineno == self.lineno)
def _match_frame(self, filename, lineno):
return self.__match_frame(filename, lineno) ^ (not self.inclusive)
def _match_traceback(self, traceback):
if self.all_frames:
if any(self.__match_frame(filename, lineno)
for filename, lineno in traceback):
return self.inclusive
else:
return (not self.inclusive)
else:
filename, lineno = traceback[0]
return self._match_frame(filename, lineno)
class Snapshot:
"""
Snapshot of traces of memory blocks allocated by Python.
"""
def __init__(self, traces, traceback_limit):
# traces is a tuple of trace tuples: see _Traces constructor for
# the exact format
self.traces = _Traces(traces)
self.traceback_limit = traceback_limit
def dump(self, filename):
"""
Write the snapshot into a file.
"""
with open(filename, "wb") as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename):
"""
Load a snapshot from a file.
"""
with open(filename, "rb") as fp:
return pickle.load(fp)
def _filter_trace(self, include_filters, exclude_filters, trace):
traceback = trace[1]
if include_filters:
if not any(trace_filter._match_traceback(traceback)
for trace_filter in include_filters):
return False
if exclude_filters:
if any(not trace_filter._match_traceback(traceback)
for trace_filter in exclude_filters):
return False
return True
def filter_traces(self, filters):
"""
Create a new Snapshot instance with a filtered traces sequence, filters
is a list of Filter instances. If filters is an empty list, return a
new Snapshot instance with a copy of the traces.
"""
if not isinstance(filters, Iterable):
raise TypeError("filters must be a list of filters, not %s"
% type(filters).__name__)
if filters:
include_filters = []
exclude_filters = []
for trace_filter in filters:
if trace_filter.inclusive:
include_filters.append(trace_filter)
else:
exclude_filters.append(trace_filter)
new_traces = [trace for trace in self.traces._traces
if self._filter_trace(include_filters,
exclude_filters,
trace)]
else:
new_traces = self.traces._traces.copy()
return Snapshot(new_traces, self.traceback_limit)
def _group_by(self, key_type, cumulative):
if key_type not in ('traceback', 'filename', 'lineno'):
raise ValueError("unknown key_type: %r" % (key_type,))
if cumulative and key_type not in ('lineno', 'filename'):
raise ValueError("cumulative mode cannot by used "
"with key type %r" % key_type)
stats = {}
tracebacks = {}
if not cumulative:
for trace in self.traces._traces:
size, trace_traceback = trace
try:
traceback = tracebacks[trace_traceback]
except KeyError:
if key_type == 'traceback':
frames = trace_traceback
elif key_type == 'lineno':
frames = trace_traceback[:1]
else: # key_type == 'filename':
frames = ((trace_traceback[0][0], 0),)
traceback = Traceback(frames)
tracebacks[trace_traceback] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
else:
# cumulative statistics
for trace in self.traces._traces:
size, trace_traceback = trace
for frame in trace_traceback:
try:
traceback = tracebacks[frame]
except KeyError:
if key_type == 'lineno':
frames = (frame,)
else: # key_type == 'filename':
frames = ((frame[0], 0),)
traceback = Traceback(frames)
tracebacks[frame] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
return stats
def statistics(self, key_type, cumulative=False):
"""
Group statistics by key_type. Return a sorted list of Statistic
instances.
"""
grouped = self._group_by(key_type, cumulative)
statistics = list(grouped.values())
statistics.sort(reverse=True, key=Statistic._sort_key)
return statistics
def compare_to(self, old_snapshot, key_type, cumulative=False):
"""
Compute the differences with an old snapshot old_snapshot. Get
statistics as a sorted list of StatisticDiff instances, grouped by
group_by.
"""
new_group = self._group_by(key_type, cumulative)
old_group = old_snapshot._group_by(key_type, cumulative)
statistics = _compare_grouped_stats(old_group, new_group)
statistics.sort(reverse=True, key=StatisticDiff._sort_key)
return statistics
def take_snapshot():
"""
Take a snapshot of traces of memory blocks allocated by Python.
"""
if not is_tracing():
raise RuntimeError("the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
traces = _get_traces()
traceback_limit = get_traceback_limit()
return Snapshot(traces, traceback_limit)
| lgpl-3.0 |
abhikumar22/MYBLOG | blg/Lib/token.py | 63 | 3075 | """Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
ATEQUAL = 50
RARROW = 51
ELLIPSIS = 52
OP = 53
AWAIT = 54
ASYNC = 55
ERRORTOKEN = 56
N_TOKENS = 57
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except OSError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
with fp:
lines = fp.read().split("\n")
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except OSError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
with fp:
format = fp.read().split("\n")
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except OSError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
with fp:
fp.write("\n".join(format))
if __name__ == "__main__":
_main()
| gpl-3.0 |
Rubisk/mcedit2 | src/mcedit2/widgets/inventory.py | 2 | 17159 | """
inventory
"""
from __future__ import absolute_import, division, print_function
import contextlib
import logging
from mcedit2.command import SimpleRevisionCommand
from mceditlib import nbt
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
import itertools
from mcedit2.widgets.itemtype_list import ItemTypeListModel, ItemTypeIcon, ICON_SIZE
from mcedit2.widgets.layout import Row, Column
from mcedit2.widgets.nbttree.nbteditor import NBTEditorWidget
from mceditlib.blocktypes import VERSION_1_7, VERSION_1_8
log = logging.getLogger(__name__)
class InventoryItemModel(QtCore.QAbstractItemModel):
ItemIDRole = Qt.UserRole
ItemRawIDRole = ItemIDRole + 1
ItemIconRole = ItemRawIDRole + 1
ItemDamageRole = ItemIconRole + 1
ItemCountRole = ItemDamageRole + 1
ItemDisplayNameRole = ItemCountRole + 1
def __init__(self, itemListRef, editorSession):
super(InventoryItemModel, self).__init__()
assert editorSession is not None
self.editorSession = editorSession
self.itemListRef = itemListRef
self.textureCache = {}
def index(self, slot, parentIndex=QtCore.QModelIndex()):
if parentIndex.isValid():
return QtCore.QModelIndex()
return self.createIndex(slot, 0)
def rowCount(self, parent):
# slot numbers are defined by the view's slotLayout
# maybe that should be the model's slotLayout instead
return 0
def data(self, index, role):
if not index.isValid():
return 0
slot = index.row()
itemStack = self.itemListRef.getItemInSlot(slot)
if itemStack is None:
return None
if role in (self.ItemIconRole, self.ItemDisplayNameRole):
try:
itemType = itemStack.itemType
except ValueError as e: # itemType not mapped
return None
except KeyError as e: # missing NBT tag?
log.exception("Error while reading item data: %r", e)
return None
if role == self.ItemIconRole:
return ItemTypeIcon(itemType, self.editorSession, itemStack)
if role == self.ItemDisplayNameRole:
return itemType.displayName
if role == self.ItemIDRole:
return itemStack.id
if role == self.ItemRawIDRole:
return itemStack.raw_id
if role == self.ItemCountRole:
return itemStack.Count
if role == self.ItemDamageRole:
return itemStack.Damage
return None
def setData(self, index, value, role):
if not index.isValid():
return 0
slot = index.row()
itemStack = self.itemListRef.getItemInSlot(slot)
if itemStack is None:
return
if role == self.ItemIDRole:
itemStack.id = value
elif role == self.ItemRawIDRole:
itemStack.raw_id = int(value)
elif role == self.ItemCountRole:
itemStack.Count = value
elif role == self.ItemDamageRole:
itemStack.Damage = value
else:
return
self.dataChanged.emit(index, index)
class InventoryItemWidget(QtGui.QPushButton):
BLANK = None
def __init__(self, inventoryView, slotNumber):
super(InventoryItemWidget, self).__init__()
self.inventoryView = inventoryView
self.slotNumber = slotNumber
self.countText = None
self.setIconSize(QtCore.QSize(ICON_SIZE, ICON_SIZE))
self.setCheckable(True)
if InventoryItemWidget.BLANK is None:
pm = QtGui.QPixmap(ICON_SIZE, ICON_SIZE)
pm.fill(Qt.transparent)
InventoryItemWidget.BLANK = QtGui.QIcon(pm)
self.setIcon(InventoryItemWidget.BLANK)
self.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
def setCount(self, val):
if val == 1:
self.countText = None
else:
self.countText = str(val)
def paintEvent(self, event):
super(InventoryItemWidget, self).paintEvent(event)
if self.countText is None:
return
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
painter.setRenderHint(QtGui.QPainter.TextAntialiasing, True)
font = QtGui.QFont("Arial", 12, 75)
outlinePen = QtGui.QPen(Qt.black)
outlinePen.setWidth(3.0)
fillBrush = QtGui.QBrush(Qt.white)
# painter.setFont(font)
x, y = 0, 0
path = QtGui.QPainterPath()
path.addText(x, y, font, self.countText)
rect = path.boundingRect()
rect.moveBottomRight(QtCore.QPointF(ICON_SIZE + 3, ICON_SIZE + rect.height()))
path.translate(rect.topLeft())
painter.setPen(outlinePen)
painter.drawPath(path)
painter.setBrush(fillBrush)
painter.setPen(None)
painter.drawPath(path)
# outlinePen = QtGui.QPen(color=Qt.black, width=4.0)
# painter.strokePath(path, outlinePen)
#painter.fillPath(path, fillBrush)
class InventoryView(QtGui.QWidget):
def __init__(self, slotLayout, rows=None, columns=None):
"""
slotLayout should be a list of (x, y, slotNumber) tuples.
rows and columns are optional. Pass them if you need the grid to be larger than the slotLayout.
:param slotLayout:
:type slotLayout: list[tuple(int, int, int)]
:type rows: int | None
:type columns: int | None
:return:
:rtype:
"""
super(InventoryView, self).__init__()
self.slotWidgets = {}
gridLayout = QtGui.QGridLayout()
self.setLayout(gridLayout)
# Add placeholders to stretch grid - QGridLayout has no setRow/ColumnCount
if rows:
gridLayout.addWidget(QtGui.QWidget(), rows-1, 0)
if columns:
gridLayout.addWidget(QtGui.QWidget(), 0, columns-1)
def _makeClicked(slot):
def _clicked():
self.slotClicked.emit(slot)
return _clicked
self.slots = []
self.buttonGroup = QtGui.QButtonGroup()
for (x, y, slotNumber) in slotLayout:
itemWidget = InventoryItemWidget(self, slotNumber)
itemWidget._clicked = _makeClicked(slotNumber)
self.slotWidgets[slotNumber] = itemWidget
gridLayout.addWidget(itemWidget, y, x)
itemWidget.clicked.connect(itemWidget._clicked)
self.slots.append(slotNumber)
self.buttonGroup.addButton(itemWidget)
self.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.model = None
self.slotClicked.connect(self.slotWasClicked)
slotClicked = QtCore.Signal(int)
def slotWasClicked(self, slotNumber):
self.slotWidgets[slotNumber].setChecked(True)
def setModel(self, model):
assert isinstance(model, InventoryItemModel)
self.model = model
self.model.dataChanged.connect(self.dataChanged)
self.updateItems()
def dataChanged(self, topLeft, bottomRight):
self.updateSlot(topLeft)
def updateItems(self):
for slot in self.slots:
index = self.model.index(slot)
self.updateSlot(index)
def updateSlot(self, index):
slot = index.row()
icon = index.data(InventoryItemModel.ItemIconRole)
slotWidget = self.slotWidgets[slot]
if icon is not None:
slotWidget.setIcon(icon)
else:
slotWidget.setIcon(InventoryItemWidget.BLANK)
count = index.data(InventoryItemModel.ItemCountRole)
if count is None:
return
slotWidget.setCount(count)
class InventoryEditor(QtGui.QWidget):
def __init__(self, slotLayout, rows=None, columns=None):
"""
slotLayout should be a list of (x, y, slotNumber) tuples.
rows and columns are optional. Pass them if you need the grid to be larger than the slotLayout.
:param slotLayout:
:type slotLayout: list[tuple(int, int, int)]
:type rows: int | None
:type columns: int | None
:return:
:rtype:
"""
super(InventoryEditor, self).__init__()
self.inventoryView = InventoryView(slotLayout, rows, columns)
self.inventoryView.slotClicked.connect(self.slotWasClicked)
self.itemList = QtGui.QListView()
self.itemList.setMinimumWidth(200)
self.itemList.clicked.connect(self.itemTypeChanged)
self.itemListModel = None
self.itemListSearchBox = QtGui.QComboBox()
self.itemListSearchBox.editTextChanged.connect(self.searchTextChanged)
self.itemListSearchBox.setEditable(True)
self.inventoryModel = None
self.internalNameField = QtGui.QLineEdit()
self.internalNameField.textChanged.connect(self.internalNameChanged)
self.rawIDInput = QtGui.QSpinBox(minimum=-32768, maximum=32767)
self.rawIDInput.setMaximumWidth(100)
self.rawIDInput.valueChanged.connect(self.rawIDChanged)
self.damageInput = QtGui.QSpinBox(minimum=-32768, maximum=32767)
self.damageInput.valueChanged.connect(self.damageChanged)
self.countInput = QtGui.QSpinBox(minimum=-32768, maximum=32767)
self.countInput.valueChanged.connect(self.countChanged)
self.rawIDCheckbox = QtGui.QCheckBox("Edit raw ID")
self.rawIDCheckbox.toggled.connect(self.rawIDInput.setEnabled)
self.itemNBTEditor = NBTEditorWidget()
self.itemNBTEditor.tagValueChanged.connect(self.tagValueDidChange)
self.currentIndex = None
self.itemNameLabel = QtGui.QLabel()
self.setLayout(Column(Row(self.inventoryView,
Column(self.itemListSearchBox, self.itemList)),
Row(QtGui.QLabel("Selected item:"), self.itemNameLabel, None),
Row(QtGui.QLabel("Internal Name"), self.internalNameField,
self.rawIDCheckbox, self.rawIDInput,
QtGui.QLabel("Damage"), self.damageInput,
QtGui.QLabel("Count"), self.countInput),
(self.itemNBTEditor, 1)))
self.enableFields(False)
def enableFields(self, enabled):
self.internalNameField.setEnabled(enabled)
self.rawIDInput.setEnabled(enabled)
self.rawIDCheckbox.setEnabled(enabled)
self.damageInput.setEnabled(enabled)
self.countInput.setEnabled(enabled)
self.itemNBTEditor.setEnabled(enabled)
def showFieldsForVersion(self, version):
oneSeven = version == VERSION_1_7
self.rawIDCheckbox.setVisible(oneSeven)
self.rawIDInput.setVisible(oneSeven)
editsDisabled = False
@contextlib.contextmanager
def disableEdits(self):
self.editsDisabled = True
yield
self.editsDisabled = False
def slotWasClicked(self, slotNumber):
self.currentIndex = self.inventoryModel.index(slotNumber)
self.updateFields()
def tagValueDidChange(self, tagPath):
self.updateFields()
def updateFields(self):
with self.disableEdits():
self._updateFields()
def _updateFields(self):
index = self.currentIndex
version = self._itemListRef.blockTypes.itemStackVersion
self.showFieldsForVersion(version)
internalName = index.data(InventoryItemModel.ItemIDRole)
if internalName is None:
self.enableFields(False)
self.internalNameField.setText("")
self.rawIDInput.setValue(0)
self.damageInput.setValue(0)
self.countInput.setValue(0)
return
else:
self.enableFields(True)
if isinstance(internalName, basestring):
self.internalNameField.setText(internalName)
else:
self.internalNameField.setText("")
self.rawIDCheckbox.setChecked(True)
if self.inventoryRef.blockTypes.itemStackVersion == VERSION_1_7:
rawID = index.data(InventoryItemModel.ItemRawIDRole)
self.rawIDCheckbox.setEnabled(True)
self.rawIDInput.setEnabled(self.rawIDCheckbox.isChecked())
self.rawIDInput.setValue(rawID)
else:
self.rawIDCheckbox.setEnabled(False)
self.rawIDInput.setEnabled(False)
damage = index.data(InventoryItemModel.ItemDamageRole)
self.damageInput.setValue(damage)
count = index.data(InventoryItemModel.ItemCountRole)
self.countInput.setValue(count)
tagRef = self._itemListRef.getItemInSlot(index.row())
self.itemNBTEditor.setRootTagRef(tagRef)
displayName = index.data(InventoryItemModel.ItemDisplayNameRole)
self.itemNameLabel.setText(displayName)
def searchTextChanged(self, value):
self.proxyModel = QtGui.QSortFilterProxyModel()
self.proxyModel.setSourceModel(self.itemListModel)
self.proxyModel.setFilterFixedString(value)
self.proxyModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.itemList.setModel(self.proxyModel)
def itemTypeChanged(self, index):
if self.currentIndex is None or self.itemListModel is None:
return
if self.editsDisabled:
return
internalName = index.data(ItemTypeListModel.InternalNameRole)
damage = index.data(ItemTypeListModel.DamageRole)
command = InventoryEditCommand(self.editorSession, self.tr("Change item type"))
with command.begin():
self.inventoryModel.setData(self.currentIndex, internalName, InventoryItemModel.ItemIDRole)
if damage is not None:
self.inventoryModel.setData(self.currentIndex, damage, InventoryItemModel.ItemDamageRole)
self.editorSession.pushCommand(command)
def internalNameChanged(self, value):
if self.currentIndex is None:
return
if self.editsDisabled:
return
if self.inventoryRef.blockTypes.itemStackVersion == VERSION_1_7:
try:
itemType = self.editorSession.worldEditor.blocktypes.itemTypes[value]
except KeyError:
return
command = InventoryEditCommand(self.editorSession, self.tr("Change item type"))
with command.begin():
self.inventoryModel.setData(self.currentIndex, value, InventoryItemModel.ItemIDRole)
self.editorSession.pushCommand(command)
def rawIDChanged(self, value):
if self.currentIndex is None:
return
if self.editsDisabled:
return
command = InventoryEditCommand(self.editorSession, self.tr("Change item's raw ID"))
with command.begin():
self.inventoryModel.setData(self.currentIndex, value, InventoryItemModel.ItemRawIDRole)
self.editorSession.pushCommand(command)
def damageChanged(self, value):
if self.currentIndex is None:
return
if self.editsDisabled:
return
command = InventoryEditCommand(self.editorSession, self.tr("Change item damage"))
with command.begin():
self.inventoryModel.setData(self.currentIndex, value, InventoryItemModel.ItemDamageRole)
self.editorSession.pushCommand(command)
def countChanged(self, value):
if self.currentIndex is None:
return
if self.editsDisabled:
return
command = InventoryEditCommand(self.editorSession, self.tr("Change item count"))
with command.begin():
self.inventoryModel.setData(self.currentIndex, value, InventoryItemModel.ItemCountRole)
self.editorSession.pushCommand(command)
_editorSession = None
@property
def editorSession(self):
return self._editorSession
@editorSession.setter
def editorSession(self, value):
self._editorSession = value
self.updateModels()
_itemListRef = None
@property
def inventoryRef(self):
return self._itemListRef
@inventoryRef.setter
def inventoryRef(self, value):
self._itemListRef = value
self.updateModels()
def updateModels(self):
if self._editorSession is None or self._itemListRef is None:
return
self.currentIndex = None
self.enableFields(False)
self.inventoryModel = InventoryItemModel(self._itemListRef, self._editorSession)
self.inventoryView.setModel(self.inventoryModel)
self.inventoryModel.dataChanged.connect(self.dataWasChanged)
self.itemListModel = ItemTypeListModel(self._editorSession)
self.itemList.setModel(self.itemListModel)
self.itemNBTEditor.editorSession = self._editorSession
def dataWasChanged(self, topLeft, bottomRight):
slot = topLeft.row()
if slot == self.currentIndex.row():
self.updateFields()
class InventoryEditCommand(SimpleRevisionCommand):
pass
| bsd-3-clause |
Multiscale-Genomics/mg-process-fastq | tests/test_bwa_indexer.py | 1 | 4426 | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from basic_modules.metadata import Metadata
from tool.bwa_indexer import bwaIndexerTool
@pytest.mark.genome
@pytest.mark.bwa
@pytest.mark.wgbs
def test_bwa_indexer_bwa():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
input_files = {
"genome": genome_fa
}
output_files = {
"index": genome_fa + ".bwa.tar.gz"
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{'assembly': 'test'}),
}
print(input_files, output_files)
bwa_it = bwaIndexerTool({"execution": resource_path})
bwa_it.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "bsSeeker.Mouse.GRCm38.fasta.bwa.tar.gz") is True
assert os.path.getsize(resource_path + "bsSeeker.Mouse.GRCm38.fasta.bwa.tar.gz") > 0
@pytest.mark.chipseq
@pytest.mark.genome
@pytest.mark.bwa
def test_bwa_indexer_chipseq():
"""
Test case to ensure that the BWA indexer works.
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "macs2.Human.GCA_000001405.22.fasta"
input_files = {
"genome": genome_fa
}
output_files = {
"index": genome_fa + ".bwa.tar.gz"
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{'assembly': 'test'}),
}
print(input_files, output_files)
bwa_it = bwaIndexerTool({"execution": resource_path})
bwa_it.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz") is True
assert os.path.getsize(resource_path + "macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz") > 0
@pytest.mark.idamidseq
@pytest.mark.genome
@pytest.mark.bwa
def test_bwa_indexer_idear():
"""
Test case to ensure that the BWA indexer works
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "idear.Human.GCA_000001405.22.fasta"
input_files = {
"genome": genome_fa
}
output_files = {
"index": genome_fa + ".bwa.tar.gz"
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{'assembly': 'test'}),
}
print(input_files, output_files)
bwa_it = bwaIndexerTool({"execution": resource_path})
bwa_it.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "idear.Human.GCA_000001405.22.fasta.bwa.tar.gz") is True
assert os.path.getsize(resource_path + "idear.Human.GCA_000001405.22.fasta.bwa.tar.gz") > 0
@pytest.mark.mnaseseq
@pytest.mark.genome
@pytest.mark.bwa
def test_bwa_indexer_mnaseseq():
"""
Test case to ensure that the BWA indexer works
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "inps.Mouse.GRCm38.fasta"
input_files = {
"genome": genome_fa
}
output_files = {
"index": genome_fa + ".bwa.tar.gz"
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{'assembly': 'test'}),
}
print(input_files, output_files)
bwa_it = bwaIndexerTool({"execution": resource_path})
bwa_it.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "inps.Mouse.GRCm38.fasta.bwa.tar.gz") is True
assert os.path.getsize(resource_path + "inps.Mouse.GRCm38.fasta.bwa.tar.gz") > 0
| apache-2.0 |
yongshengwang/hue | desktop/core/ext-py/Paste-2.0.1/paste/evalexception/evalcontext.py | 50 | 2155 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
from six.moves import cStringIO as StringIO
import traceback
import threading
import pdb
import six
import sys
exec_lock = threading.Lock()
class EvalContext(object):
"""
Class that represents a interactive interface. It has its own
namespace. Use eval_context.exec_expr(expr) to run commands; the
output of those commands is returned, as are print statements.
This is essentially what doctest does, and is taken directly from
doctest.
"""
def __init__(self, namespace, globs):
self.namespace = namespace
self.globs = globs
def exec_expr(self, s):
out = StringIO()
exec_lock.acquire()
save_stdout = sys.stdout
try:
debugger = _OutputRedirectingPdb(save_stdout)
debugger.reset()
pdb.set_trace = debugger.set_trace
sys.stdout = out
try:
code = compile(s, '<web>', "single", 0, 1)
six.exec_(code, self.globs, self.namespace)
debugger.set_continue()
except KeyboardInterrupt:
raise
except:
traceback.print_exc(file=out)
debugger.set_continue()
finally:
sys.stdout = save_stdout
exec_lock.release()
return out.getvalue()
# From doctest
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
| apache-2.0 |
EmbodiedCognition/pagoda | pagoda/cooper.py | 1 | 27980 | # -*- coding: utf-8 -*-
'''This module contains a Python implementation of a forward-dynamics solver.
For detailed information about the solver, its raison d'être, and how it works,
please see the documentation for the :class:`World` class.
Further comments and documentation are available in this source file. Eventually
I hope to integrate these comments into some sort of online documentation for
the package as a whole.
'''
from __future__ import division, print_function, absolute_import
import logging
import numpy as np
import ode
import re
from . import physics
from . import skeleton
class Markers:
'''
'''
DEFAULT_CFM = 1e-6
DEFAULT_ERP = 0.3
def __init__(self, world):
self.world = world
self.jointgroup = ode.JointGroup()
self.bodies = {}
self.joints = {}
self.targets = {}
self.offsets = {}
self.channels = {}
self.data = None
self.cfms = None
self.erp = Markers.DEFAULT_ERP
# these arrays are derived from the data array.
self.visibility = None
self.positions = None
self.velocities = None
self._frame_no = -1
@property
def num_frames(self):
'''Return the number of frames of marker data.'''
return self.data.shape[0]
@property
def num_markers(self):
'''Return the number of markers in each frame of data.'''
return self.data.shape[1]
@property
def labels(self):
'''Return the names of our marker labels in canonical order.'''
return sorted(self.channels, key=lambda c: self.channels[c])
def __iter__(self):
return iter(self.data)
def __getitem__(self, idx):
return self.data[idx]
def _map_labels_to_channels(self, labels):
if isinstance(labels, str):
labels = labels.strip().split()
if isinstance(labels, (tuple, list)):
return dict((c, i) for i, c in enumerate(labels))
return labels or {}
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)):
'''Load marker data from a CSV file.
The file will be imported using Pandas, which must be installed to use
this method. (``pip install pandas``)
The first line of the CSV file will be used for header information. The
"time" column will be used as the index for the data frame. There must
be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and
'markerAB-foo-c' for marker 'foo' to be included in the model.
Parameters
----------
filename : str
Name of the CSV file to load.
'''
import pandas as pd
compression = None
if filename.endswith('.gz'):
compression = 'gzip'
df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1)
# make sure the data frame's time index matches our world.
assert self.world.dt == pd.Series(df.index).diff().mean()
markers = []
for c in df.columns:
m = re.match(r'^marker\d\d-(.*)-c$', c)
if m:
markers.append(m.group(1))
self.channels = self._map_labels_to_channels(markers)
cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)]
self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:]
self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]]
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies()
def load_c3d(self, filename, start_frame=0, max_frames=int(1e300)):
'''Load marker data from a C3D file.
The file will be imported using the c3d module, which must be installed
to use this method. (``pip install c3d``)
Parameters
----------
filename : str
Name of the C3D file to load.
start_frame : int, optional
Discard the first N frames. Defaults to 0.
max_frames : int, optional
Maximum number of frames to load. Defaults to loading all frames.
'''
import c3d
with open(filename, 'rb') as handle:
reader = c3d.Reader(handle)
logging.info('world frame rate %s, marker frame rate %s',
1 / self.world.dt, reader.point_rate)
# set up a map from marker label to index in the data stream.
self.channels = self._map_labels_to_channels([
s.strip() for s in reader.point_labels])
# read the actual c3d data into a numpy array.
data = []
for i, (_, frame, _) in enumerate(reader.read_frames()):
if i >= start_frame:
data.append(frame[:, [0, 1, 2, 4]])
if len(data) > max_frames:
break
self.data = np.array(data)
# scale the data to meters -- mm is a very common C3D unit.
if reader.get('POINT:UNITS').string_value.strip().lower() == 'mm':
logging.info('scaling point data from mm to m')
self.data[:, :, :3] /= 1000.
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies()
def process_data(self):
'''Process data to produce velocity and dropout information.'''
self.visibility = self.data[:, :, 3]
self.positions = self.data[:, :, :3]
self.velocities = np.zeros_like(self.positions) + 1000
for frame_no in range(1, len(self.data) - 1):
prev = self.data[frame_no - 1]
next = self.data[frame_no + 1]
for c in range(self.num_markers):
if -1 < prev[c, 3] < 100 and -1 < next[c, 3] < 100:
self.velocities[frame_no, c] = (
next[c, :3] - prev[c, :3]) / (2 * self.world.dt)
self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM
def create_bodies(self):
'''Create physics bodies corresponding to each marker in our data.'''
self.bodies = {}
for label in self.channels:
body = self.world.create_body(
'sphere', name='marker:{}'.format(label), radius=0.02)
body.is_kinematic = True
body.color = 0.9, 0.1, 0.1, 0.5
self.bodies[label] = body
def load_attachments(self, source, skeleton):
'''Load attachment configuration from the given text source.
The attachment configuration file has a simple format. After discarding
Unix-style comments (any part of a line that starts with the pound (#)
character), each line in the file is then expected to have the following
format::
marker-name body-name X Y Z
The marker name must correspond to an existing "channel" in our marker
data. The body name must correspond to a rigid body in the skeleton. The
X, Y, and Z coordinates specify the body-relative offsets where the
marker should be attached: 0 corresponds to the center of the body along
the given axis, while -1 and 1 correspond to the minimal (maximal,
respectively) extent of the body's bounding box along the corresponding
dimension.
Parameters
----------
source : str or file-like
A filename or file-like object that we can use to obtain text
configuration that describes how markers are attached to skeleton
bodies.
skeleton : :class:`pagoda.skeleton.Skeleton`
The skeleton to attach our marker data to.
'''
self.targets = {}
self.offsets = {}
filename = source
if isinstance(source, str):
source = open(source)
else:
filename = '(file-{})'.format(id(source))
for i, line in enumerate(source):
tokens = line.split('#')[0].strip().split()
if not tokens:
continue
label = tokens.pop(0)
if label not in self.channels:
logging.info('%s:%d: unknown marker %s', filename, i, label)
continue
if not tokens:
continue
name = tokens.pop(0)
bodies = [b for b in skeleton.bodies if b.name == name]
if len(bodies) != 1:
logging.info('%s:%d: %d skeleton bodies match %s',
filename, i, len(bodies), name)
continue
b = self.targets[label] = bodies[0]
o = self.offsets[label] = \
np.array(list(map(float, tokens))) * b.dimensions / 2
logging.info('%s <--> %s, offset %s', label, b.name, o)
def detach(self):
'''Detach all marker bodies from their associated skeleton bodies.'''
self.jointgroup.empty()
self.joints = {}
def attach(self, frame_no):
'''Attach marker bodies to the corresponding skeleton bodies.
Attachments are only made for markers that are not in a dropout state in
the given frame.
Parameters
----------
frame_no : int
The frame of data we will use for attaching marker bodies.
'''
assert not self.joints
for label, j in self.channels.items():
target = self.targets.get(label)
if target is None:
continue
if self.visibility[frame_no, j] < 0:
continue
if np.linalg.norm(self.velocities[frame_no, j]) > 10:
continue
joint = ode.BallJoint(self.world.ode_world, self.jointgroup)
joint.attach(self.bodies[label].ode_body, target.ode_body)
joint.setAnchor1Rel([0, 0, 0])
joint.setAnchor2Rel(self.offsets[label])
joint.setParam(ode.ParamCFM, self.cfms[frame_no, j])
joint.setParam(ode.ParamERP, self.erp)
joint.name = label
self.joints[label] = joint
self._frame_no = frame_no
def reposition(self, frame_no):
'''Reposition markers to a specific frame of data.
Parameters
----------
frame_no : int
The frame of data where we should reposition marker bodies. Markers
will be positioned in the appropriate places in world coordinates.
In addition, linear velocities of the markers will be set according
to the data as long as there are no dropouts in neighboring frames.
'''
for label, j in self.channels.items():
body = self.bodies[label]
body.position = self.positions[frame_no, j]
body.linear_velocity = self.velocities[frame_no, j]
def distances(self):
'''Get a list of the distances between markers and their attachments.
Returns
-------
distances : ndarray of shape (num-markers, 3)
Array of distances for each marker joint in our attachment setup. If
a marker does not currently have an associated joint (e.g. because
it is not currently visible) this will contain NaN for that row.
'''
distances = []
for label in self.labels:
joint = self.joints.get(label)
distances.append([np.nan, np.nan, np.nan] if joint is None else
np.array(joint.getAnchor()) - joint.getAnchor2())
return np.array(distances)
def forces(self, dx_tm1=None):
'''Return an array of the forces exerted by marker springs.
Notes
-----
The forces exerted by the marker springs can be approximated by::
F = kp * dx
where ``dx`` is the current array of marker distances. An even more
accurate value is computed by approximating the velocity of the spring
displacement::
F = kp * dx + kd * (dx - dx_tm1) / dt
where ``dx_tm1`` is an array of distances from the previous time step.
Parameters
----------
dx_tm1 : ndarray
An array of distances from markers to their attachment targets,
measured at the previous time step.
Returns
-------
F : ndarray
An array of forces that the markers are exerting on the skeleton.
'''
cfm = self.cfms[self._frame_no][:, None]
kp = self.erp / (cfm * self.world.dt)
kd = (1 - self.erp) / cfm
dx = self.distances()
F = kp * dx
if dx_tm1 is not None:
bad = np.isnan(dx) | np.isnan(dx_tm1)
F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad]
return F
class World(physics.World):
'''Simulate a physics world that includes an articulated skeleton model.
The "cooper" method, originally described by Cooper & Ballard (2012 Proc.
Motion in Games), uses a forward physics simulator (here, the Open Dynamics
Engine; ODE) to compute inverse motion quantities like angles and torques
using motion-capture data and a structured, articulated model of the human
skeleton. The prerequisites for this method are:
- Record some motion-capture data from a human. This is expected to result
in the locations, in world coordinates, of several motion-capture markers
at regularly-spaced intervals over time.
- Construct a simulated skeleton that matches the size and shape of the
human to some reasonable degree of accuracy. The more accurate the
skeleton, the more accurate the resulting measurements.
In broad strokes, the cooper method proceeds in two stages:
1. :func:`Inverse Kinematics <inverse_kinematics>`. The motion-capture data
are attached to the simulated skeleton using ball joints. These ball
joints are configured so that their constraints (namely, placing both
anchor points of the joint at the same location in space) are allowed to
slip; ODE implements this slippage using a spring dynamics, which
provides a natural mechanism for the articulated skeleton to interpolate
the marker data as well as possible.
At each frame during the first pass, the motion-capture markers are
placed at the appropriate location in space, and the attached articulated
skeleton "snaps" toward the markers using its inertia (from the motion in
preceding frames) as well as the spring constraints provided by the
marker joint slippage.
At each frame of this process, the articulated skeleton can be queried to
obtain joint angles for each degree of freedom. In addition, the markers
can be queried to find their constraint slippage.
2. :func:`Inverse Dynamics <inverse_dynamics>`. The marker constraints are
removed, and the joint angles computed in the first pass are used to
constrain the skeleton's movements.
At each frame during the second pass, the joints in the skeleton attempt
to follow the angles computed in the first pass; a PID controller is used
to convert the angular error value into a target angular velocity for
each joint.
The torques that ODE computes to solve this forward angle-following
problem are returned as a result of the second pass.
In general, the cooper model is a useful way of getting a physics simulator,
a model of a human skeleton, and some motion-capture data to interact
smoothly. Particularly useful for almost any simulations of human motion are
the :func:`settle_to_markers` and :func:`follow_markers` methods.
'''
def load_skeleton(self, filename, pid_params=None):
'''Create and configure a skeleton in our model.
Parameters
----------
filename : str
The name of a file containing skeleton configuration data.
pid_params : dict, optional
If given, use this dictionary to set the PID controller
parameters on each joint in the skeleton. See
:func:`pagoda.skeleton.pid` for more information.
'''
self.skeleton = skeleton.Skeleton(self)
self.skeleton.load(filename, color=(0.3, 0.5, 0.9, 0.8))
if pid_params:
self.skeleton.set_pid_params(**pid_params)
self.skeleton.erp = 0.1
self.skeleton.cfm = 0
def load_markers(self, filename, attachments, max_frames=1e100):
'''Load marker data and attachment preferences into the model.
Parameters
----------
filename : str
The name of a file containing marker data. This currently needs to
be either a .C3D or a .CSV file. CSV files must adhere to a fairly
strict column naming convention; see :func:`Markers.load_csv` for
more information.
attachments : str
The name of a text file specifying how markers are attached to
skeleton bodies.
max_frames : number, optional
Only read in this many frames of marker data. By default, the entire
data file is read into memory.
Returns
-------
markers : :class:`Markers`
Returns a markers object containing loaded marker data as well as
skeleton attachment configuration.
'''
self.markers = Markers(self)
fn = filename.lower()
if fn.endswith('.c3d'):
self.markers.load_c3d(filename, max_frames=max_frames)
elif fn.endswith('.csv') or fn.endswith('.csv.gz'):
self.markers.load_csv(filename, max_frames=max_frames)
else:
logging.fatal('%s: not sure how to load markers!', filename)
self.markers.load_attachments(attachments, self.skeleton)
def step(self, substeps=2):
'''Advance the physics world by one step.
Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but
it can also be called manually (or some other stepping mechanism
entirely can be used).
'''
# by default we step by following our loaded marker data.
self.frame_no += 1
try:
next(self.follower)
except (AttributeError, StopIteration) as err:
self.reset()
def reset(self):
'''Reset the automatic process that gets called by :func:`step`.
By default this follows whatever marker data is loaded into our model.
Provide an override for this method to customize the default behavior of
the :func:`step` method.
'''
self.follower = self.follow_markers()
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300,
states=None):
'''Settle the skeleton to our marker data at a specific frame.
Parameters
----------
frame_no : int, optional
Settle the skeleton to marker data at this frame. Defaults to 0.
max_distance : float, optional
The settling process will stop when the mean marker distance falls
below this threshold. Defaults to 0.1m (10cm). Setting this too
small prevents the settling process from finishing (it will loop
indefinitely), and setting it too large prevents the skeleton from
settling to a stable state near the markers.
max_iters : int, optional
Attempt to settle markers for at most this many iterations. Defaults
to 1000.
states : list of body states, optional
If given, set the bodies in our skeleton to these kinematic states
before starting the settling process.
'''
if states is not None:
self.skeleton.set_body_states(states)
dist = None
for _ in range(max_iters):
for _ in self._step_to_marker_frame(frame_no):
pass
dist = np.nanmean(abs(self.markers.distances()))
logging.info('settling to frame %d: marker distance %.3f', frame_no, dist)
if dist < max_distance:
return self.skeleton.get_body_states()
for b in self.skeleton.bodies:
b.linear_velocity = 0, 0, 0
b.angular_velocity = 0, 0, 0
return states
def follow_markers(self, start=0, end=1e100, states=None):
'''Iterate over a set of marker data, dragging its skeleton along.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(self.markers):
if frame_no < start:
continue
if frame_no >= end:
break
for states in self._step_to_marker_frame(frame_no):
yield states
def _step_to_marker_frame(self, frame_no, dt=None):
'''Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
'''
# update the positions and velocities of the markers.
self.markers.detach()
self.markers.reposition(frame_no)
self.markers.attach(frame_no)
# detect collisions.
self.ode_space.collide(None, self.on_collision)
# record the state of each skeleton body.
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# yield the current simulation state to our caller.
yield states
# update the ode world.
self.ode_world.step(dt or self.dt)
# clear out contact joints to prepare for the next frame.
self.ode_contactgroup.empty()
def inverse_kinematics(self, start=0, end=1e100, states=None, max_force=20):
'''Follow a set of marker data, yielding kinematic joint angles.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to maintain its equilibrium position. This
defaults to 20N. Set this value higher to simulate a stiff skeleton
while following marker data.
Returns
-------
angles : sequence of angle frames
Returns a generator of joint angle data for the skeleton. One set of
joint angles will be generated for each frame of marker data between
`start` and `end`.
'''
zeros = None
if max_force > 0:
self.skeleton.enable_motors(max_force)
zeros = np.zeros(self.skeleton.num_dofs)
for _ in self.follow_markers(start, end, states):
if zeros is not None:
self.skeleton.set_target_angles(zeros)
yield self.skeleton.joint_angles
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100):
'''Follow a set of angle data, yielding dynamic joint torques.
Parameters
----------
angles : ndarray (num-frames x num-dofs)
Follow angle data provided by this array of angle values.
start : int, optional
Start following angle data after this frame. Defaults to the start
of the angle data.
end : int, optional
Stop following angle data after this frame. Defaults to the end of
the angle data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to follow the given joint angles. Defaults to
100N. Setting this value to be large results in more accurate
following but can cause oscillations in the PID controllers,
resulting in noisy torques.
Returns
-------
torques : sequence of torque frames
Returns a generator of joint torque data for the skeleton. One set
of joint torques will be generated for each frame of angle data
between `start` and `end`.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(angles):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# joseph's stability fix: step to compute torques, then reset the
# skeleton to the start of the step, and then step using computed
# torques. thus any numerical errors between the body states after
# stepping using angle constraints will be removed, because we
# will be stepping the model using the computed torques.
self.skeleton.enable_motors(max_force)
self.skeleton.set_target_angles(angles[frame_no])
self.ode_world.step(self.dt)
torques = self.skeleton.joint_torques
self.skeleton.disable_motors()
self.skeleton.set_body_states(states)
self.skeleton.add_torques(torques)
yield torques
self.ode_world.step(self.dt)
self.ode_contactgroup.empty()
def forward_dynamics(self, torques, start=0, states=None):
'''Move the body according to a set of torque data.'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, torque in enumerate(torques):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
self.skeleton.add_torques(torque)
self.ode_world.step(self.dt)
yield
self.ode_contactgroup.empty()
| mit |
prutseltje/ansible | lib/ansible/modules/packaging/os/redhat_subscription.py | 6 | 28269 | #!/usr/bin/python
# James Laska (jlaska@redhat.com)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
version_added: "1.2"
author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
config file and default to None.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
choices: [ "present", "absent" ]
default: "present"
username:
description:
- access.redhat.com or Sat6 username
password:
description:
- access.redhat.com or Sat6 password
server_hostname:
description:
- Specify an alternative Red Hat Subscription Management or Sat6 server
server_insecure:
description:
- Enable or disable https server certificate verification when connecting to C(server_hostname)
rhsm_baseurl:
description:
- Specify CDN baseurl
server_proxy_hostname:
description:
- Specify a HTTP proxy hostname
version_added: "2.4"
server_proxy_port:
description:
- Specify a HTTP proxy port
version_added: "2.4"
server_proxy_user:
description:
- Specify a user for HTTP proxy with basic authentication
version_added: "2.4"
server_proxy_password:
description:
- Specify a password for HTTP proxy with basic authentication
version_added: "2.4"
auto_attach:
description:
- Upon successful registration, auto-consume available subscriptions
- Added in favor of deprecated autosubscribe in 2.5.
type: bool
default: 'no'
version_added: "2.5"
aliases: [autosubscribe]
activationkey:
description:
- supply an activation key for use with registration
org_id:
description:
- Organization ID to use in conjunction with activationkey
version_added: "2.0"
environment:
description:
- Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
version_added: "2.2"
pool:
description:
- |
Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
possible, as it is much faster. Mutually exclusive with I(pool_ids).
default: '^$'
pool_ids:
description:
- |
Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
default: []
version_added: "2.4"
consumer_type:
description:
- The type of unit to register, defaults to system
version_added: "2.1"
consumer_name:
description:
- Name of the system to register, defaults to the hostname
version_added: "2.1"
consumer_id:
description:
- |
References an existing consumer ID to resume using a previous registration
for this system. If the system's identity certificate is lost or corrupted,
this option allows it to resume using its previous identity and subscriptions.
The default is to not specify a consumer ID so a new ID is created.
version_added: "2.1"
force_register:
description:
- Register the system even if it is already registered
type: bool
default: 'no'
version_added: "2.2"
'''
EXAMPLES = '''
- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
redhat_subscription:
state: present
username: joe_user
password: somepass
auto_attach: true
- name: Same as above but subscribe to a specific pool by ID.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids: 0123456789abcdef0123456789abcdef
- name: Register and subscribe to multiple pools.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef
- 1123456789abcdef0123456789abcdef
- name: Same as above but consume multiple entitlements.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef: 2
- 1123456789abcdef0123456789abcdef: 4
- name: Register and pull existing system data.
redhat_subscription:
state: present
username: joe_user
password: somepass
consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^Red Hat Enterprise Server$'
- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
redhat_subscription:
state: present
username: joe_user
password: somepass
environment: Library
auto_attach: true
'''
RETURN = '''
subscribed_pool_ids:
description: List of pool IDs to which system is now subscribed
returned: success
type: complex
contains: {
"8a85f9815ab905d3015ab928c7005de4": "1"
}
'''
import os
import re
import shutil
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves import configparser
SUBMAN_CMD = None
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.module = module
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHSM
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--server.hostname'.
for k, v in kwargs.items():
if re.search(r'^(server|rhsm)_', k) and v is not None:
args.append('--%s=%s' % (k.replace('_', '.', 1), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHSM.
'''
args = [SUBMAN_CMD, 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register, environment,
rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
server_proxy_port, server_proxy_user, server_proxy_password):
'''
Register the current system to the provided RHSM or Sat6 server
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'register']
# Generate command arguments
if force_register:
args.extend(['--force'])
if rhsm_baseurl:
args.extend(['--baseurl', rhsm_baseurl])
if server_insecure:
args.extend(['--insecure'])
if server_hostname:
args.extend(['--serverurl', server_hostname])
if org_id:
args.extend(['--org', org_id])
if activationkey:
args.extend(['--activationkey', activationkey])
else:
if auto_attach:
args.append('--auto-attach')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
if consumer_type:
args.extend(['--type', consumer_type])
if consumer_name:
args.extend(['--name', consumer_name])
if consumer_id:
args.extend(['--consumerid', consumer_id])
if environment:
args.extend(['--environment', environment])
if server_proxy_hostname and server_proxy_port:
args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
if server_proxy_user:
args.extend(['--proxyuser', server_proxy_user])
if server_proxy_password:
args.extend(['--proxypassword', server_proxy_password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = [SUBMAN_CMD, 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression. It matches regexp against available pool ids first.
If any pool ids match, subscribe to those pools and return.
If no pool ids match, then match regexp against available pool product
names. Note this can still easily match many many pools. Then subscribe
to those pools.
Since a pool id is a more specific match, we only fallback to matching
against names if we didn't match pool ids.
Raises:
* Exception - if error occurs while running command
'''
# See https://github.com/ansible/ansible/issues/19466
# subscribe to pools whose pool id matches regexp (and only the pool id)
subscribed_pool_ids = self.subscribe_pool(regexp)
# If we found any matches, we are done
# Don't attempt to match pools by product name
if subscribed_pool_ids:
return subscribed_pool_ids
# We didn't match any pool ids.
# Now try subscribing to pools based on product name match
# Note: This can match lots of product names.
subscribed_by_product_pool_ids = self.subscribe_product(regexp)
if subscribed_by_product_pool_ids:
return subscribed_by_product_pool_ids
# no matches
return []
def subscribe_by_pool_ids(self, pool_ids):
for pool_id, quantity in pool_ids.items():
args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity]
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return pool_ids
def subscribe_pool(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_pools(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def subscribe_product(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_products(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
def update_subscriptions_by_pool_ids(self, pool_ids):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
existing_pools = {}
for p in consumed_pools:
existing_pools[p.get_pool_id()] = p.QuantityUsed
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
serials = self.unsubscribe(serials=serials_to_remove)
missing_pools = {}
for pool_id, quantity in pool_ids.items():
if existing_pools.get(pool_id, 0) != quantity:
missing_pools[pool_id] = quantity
self.subscribe_by_pool_ids(missing_pools)
if missing_pools or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
'unsubscribed_serials': serials}
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter_pools(self, regexp='^$'):
'''
Return a list of RhsmPools whose pool id matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product.get_pool_id()):
yield product
def filter_products(self, regexp='^$'):
'''
Return a list of RhsmPools whose product name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhsm = Rhsm(None)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present',
choices=['present', 'absent']),
username=dict(default=None,
required=False),
password=dict(default=None,
required=False,
no_log=True),
server_hostname=dict(default=None,
required=False),
server_insecure=dict(default=None,
required=False),
rhsm_baseurl=dict(default=None,
required=False),
auto_attach=dict(aliases=['autosubscribe'], default=False, type='bool'),
activationkey=dict(default=None,
required=False,
no_log=True),
org_id=dict(default=None,
required=False),
environment=dict(default=None,
required=False, type='str'),
pool=dict(default='^$',
required=False,
type='str'),
pool_ids=dict(default=[],
required=False,
type='list'),
consumer_type=dict(default=None,
required=False),
consumer_name=dict(default=None,
required=False),
consumer_id=dict(default=None,
required=False),
force_register=dict(default=False,
type='bool'),
server_proxy_hostname=dict(default=None,
required=False),
server_proxy_port=dict(default=None,
required=False),
server_proxy_user=dict(default=None,
required=False),
server_proxy_password=dict(default=None,
required=False,
no_log=True),
),
required_together=[['username', 'password'],
['server_proxy_hostname', 'server_proxy_port'],
['server_proxy_user', 'server_proxy_password']],
mutually_exclusive=[['activationkey', 'username'],
['activationkey', 'consumer_id'],
['activationkey', 'environment'],
['activationkey', 'autosubscribe'],
['force', 'consumer_id'],
['pool', 'pool_ids']],
required_if=[['state', 'present', ['username', 'activationkey'], True]],
)
rhsm.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
auto_attach = module.params['auto_attach']
activationkey = module.params['activationkey']
org_id = module.params['org_id']
if activationkey and not org_id:
module.fail_json(msg='org_id is required when using activationkey')
environment = module.params['environment']
pool = module.params['pool']
pool_ids = {}
for value in module.params['pool_ids']:
if isinstance(value, dict):
if len(value) != 1:
module.fail_json(msg='Unable to parse pool_ids option.')
pool_id, quantity = value.items()[0]
else:
pool_id, quantity = value, 1
pool_ids[pool_id] = str(quantity)
consumer_type = module.params["consumer_type"]
consumer_name = module.params["consumer_name"]
consumer_id = module.params["consumer_id"]
force_register = module.params["force_register"]
server_proxy_hostname = module.params['server_proxy_hostname']
server_proxy_port = module.params['server_proxy_port']
server_proxy_user = module.params['server_proxy_user']
server_proxy_password = module.params['server_proxy_password']
global SUBMAN_CMD
SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
# Ensure system is registered
if state == 'present':
# Register system
if rhsm.is_registered and not force_register:
if pool != '^$' or pool_ids:
try:
if pool_ids:
result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
else:
result = rhsm.update_subscriptions(pool)
except Exception as e:
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(**result)
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhsm.enable()
rhsm.configure(**module.params)
rhsm.register(username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register,
environment, rhsm_baseurl, server_insecure, server_hostname,
server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password)
if pool_ids:
subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
else:
subscribed_pool_ids = rhsm.subscribe(pool)
except Exception as e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhsm.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhsm.unsubscribe()
rhsm.unregister()
except Exception as e:
module.fail_json(msg="Failed to unregister: %s" % to_native(e))
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
if __name__ == '__main__':
main()
| gpl-3.0 |
janocat/odoo | addons/account/project/report/analytic_balance.py | 358 | 7060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'get_objects': self._get_objects,
'lines_g': self._lines_g,
'move_sum': self._move_sum,
'sum_all': self._sum_all,
'sum_balance': self._sum_balance,
'move_sum_balance': self._move_sum_balance,
})
self.acc_ids = []
self.read_data = []
self.empty_acc = False
self.acc_data_dict = {}# maintains a relation with an account with its successors.
self.acc_sum_list = []# maintains a list of all ids
def get_children(self, ids):
read_data = self.pool.get('account.analytic.account').read(self.cr, self.uid, ids,['child_ids','code','complete_name','balance'])
for data in read_data:
if (data['id'] not in self.acc_ids):
inculde_empty = True
if (not self.empty_acc) and data['balance'] == 0.00:
inculde_empty = False
if inculde_empty:
self.acc_ids.append(data['id'])
self.read_data.append(data)
if data['child_ids']:
self.get_children(data['child_ids'])
return True
def _get_objects(self, empty_acc):
if self.read_data:
return self.read_data
self.empty_acc = empty_acc
self.read_data = []
self.get_children(self.ids)
return self.read_data
def _lines_g(self, account_id, date1, date2):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,
[('parent_id', 'child_of', [account_id])])
self.cr.execute("SELECT aa.name AS name, aa.code AS code, \
sum(aal.amount) AS balance, sum(aal.unit_amount) AS quantity \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) \
AND (aal.account_id IN %s)\
AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code \
ORDER BY aal.code", (tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['balance'] == 0
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _move_sum(self, account_id, date1, date2, option):
if account_id not in self.acc_data_dict:
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', [account_id])])
self.acc_data_dict[account_id] = ids
else:
ids = self.acc_data_dict[account_id]
query_params = (tuple(ids), date1, date2)
if option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _move_sum_balance(self, account_id, date1, date2):
debit = self._move_sum(account_id, date1, date2, 'debit')
credit = self._move_sum(account_id, date1, date2, 'credit')
return (debit-credit)
def _sum_all(self, accounts, date1, date2, option):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = map(lambda x: x['id'], accounts)
if not ids:
return 0.0
if not self.acc_sum_list:
ids2 = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', ids)])
self.acc_sum_list = ids2
else:
ids2 = self.acc_sum_list
query_params = (tuple(ids2), date1, date2)
if option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_all(accounts, date1, date2, 'debit') or 0.0
credit = self._sum_all(accounts, date1, date2, 'credit') or 0.0
return (debit-credit)
class report_analyticbalance(osv.AbstractModel):
_name = 'report.account.report_analyticbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_analyticbalance'
_wrapped_report_class = account_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Lib/test/sample_doctest.py | 228 | 1037 | """This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print 'a'
... print
... print 'b'
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
| lgpl-2.1 |
wang1352083/pythontool | python-2.7.12-lib/lib2to3/btm_matcher.py | 326 | 6834 | """A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <gboutsioukis@gmail.com>"
import logging
import itertools
from collections import defaultdict
from . import pytree
from .btm_utils import reduce_tree
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
count = itertools.count()
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = next(BMNode.count)
self.content = ''
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results:
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| mit |
Snergster/virl-salt | openstack/nova/files/kilo/manager.py | 2 | 322679 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneclient import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import loopingcall
from nova.openstack.common import periodic_task
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
cfg.IntOpt('max_concurrent_builds',
default=10,
help='Maximum number of instance builds to run concurrently'),
cfg.IntOpt('block_device_allocate_retries',
default=60,
help='Number of times to retry block device'
' allocation on failures')
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between the database and '
'the hypervisor. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance network information "
"cache updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload. Set to -1 to disable.'
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'immediately when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help='Interval in seconds for retrying failed instance file '
'deletes. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
help='Waiting time interval (seconds) between block'
' device allocation retries on failures'),
cfg.IntOpt('scheduler_instance_sync_interval',
default=120,
help='Waiting time interval (seconds) between sending the '
'scheduler a list of current instance UUIDs to verify '
'that its view of instances is in sync with nova. If the '
'CONF option `scheduler_tracks_instance_changes` is '
'False, changing this option will have no effect.'),
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status. "
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("shutdown_timeout",
default=60,
help="Total amount of time to wait in seconds for an instance "
"to perform a clean shutdown."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
" Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help='The number of times to attempt to reap an instance\'s '
'files.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('base_url', 'nova.console.serial', group='serial_console')
CONF.import_opt('destroy_after_evacuate', 'nova.utils', group='workarounds')
CONF.import_opt('scheduler_tracks_instance_changes',
'nova.scheduler.host_manager')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
migration = keyed_args['migration']
# NOTE(rajesht): If InstanceNotFound error is thrown from
# decorated function, migration status should be set to
# 'error', without checking current migration status.
if not isinstance(ex, exception.InstanceNotFound):
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance_uuid = keyed_args['instance']['uuid']
try:
self._instance_update(context,
instance_uuid,
task_state=None)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
msg = _LW("Failed to revert task state for instance. "
"Error: %s")
LOG.warning(msg, e, instance_uuid=instance_uuid)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, event_name, instance_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = objects.Aggregate._from_db_object(
context.elevated(), objects.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if not self._events:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
return self._events.pop(instance.uuid, {})
return _clear_events()
def cancel_all_events(self):
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for event_name, eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(event)s for '
'instance %(instance_uuid)s',
{'event': event_name,
'instance_uuid': instance_uuid})
name, tag = event_name.split('-', 1)
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.40')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool()
self._syncs_in_progress = {}
self.send_instance_updates = CONF.scheduler_tracks_instance_changes
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
self.additional_endpoints.append(_ComputeV4Proxy(self))
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if (instance['host'] == self.host and
self.driver.node_is_available(instance['node'])):
rt = self._get_resource_tracker(instance.get('node'))
rt.update_usage(context, instance)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
self._update_resource_tracker(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance):
instance_uuid = instance.uuid
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR',
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
if (instance.task_state in [task_states.MIGRATING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
or instance.vm_state in [vm_states.RESIZED]):
LOG.debug('Will not delete instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s) but its task state is '
'(%(task_state)s) and vm state is '
'(%(vm_state)s)',
{'instance_host': instance.host,
'our_host': our_host,
'task_state': instance.task_state,
'vm_state': instance.vm_state},
instance=instance)
continue
if not CONF.workarounds.destroy_after_evacuate:
LOG.warning(_LW('Instance %(uuid)s appears to have been '
'evacuated from this host to %(host)s. '
'Not destroying it locally due to '
'config setting '
'"workarounds.destroy_after_evacuate". '
'If this is not correct, enable that '
'option and restart nova-compute.'),
{'uuid': instance.uuid,
'host': instance.host})
continue
LOG.info(_LI('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.vcpus, ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.vcpus
mem_mb = instance.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning(_LW('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.'),
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance(
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance)
return
try_reboot, reboot_type = self._retry_reboot(context, instance)
current_power_state = self._get_power_state(context, instance)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
self.compute_rpcapi.reboot_instance(context, instance,
block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def _retry_reboot(self, context, instance):
current_power_state = self._get_power_state(context, instance)
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
if vm_power_state is not None:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
dict(event=event.get_name(),
vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
vm_power_state=vm_power_state),
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@object_compat
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
return self.network_api.get_instance_nw_info(context, instance)
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning(_LW("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s."),
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = msg
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=e.format_message()) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance)
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
original_context = context
context = context.elevated()
# NOTE(danms): This method is deprecated, but could be called,
# and if it is, it will have an old megatuple for requested_networks.
if requested_networks is not None:
requested_networks_obj = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
else:
requested_networks_obj = None
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node)
network_info = None
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(original_context,
instance, requested_networks_obj, macs,
security_groups, dhcp_options)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance.access_ip_v4 and
not instance.access_ip_v6)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=_("Instance disappeared during build"))
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(original_context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=six.text_type(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
original_context = context
context = context.elevated()
instance_uuid = instance.uuid
rescheduled = False
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug("Clean up resource before rescheduling.",
instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(original_context, request_spec,
filter_properties, instance,
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance.uuid,
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
retries = 0
attempts = retries + 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(
driver_block_device.is_block_device_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance.root_device_name,
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver, do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_blanks(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warn(msg, instance=instance)
raise exception.InvalidBDM()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
self._update_instance_after_spawn(context, instance)
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
network_info.wait(do_raise=True)
instance.info_cache.network_info = network_info
# NOTE(JoshNang) This also saves the changes to the instance from
# _allocate_network_async, as they aren't saved in that function
# to prevent races.
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, objects.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug('Deallocating network for instance', instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
swap = driver_block_device.convert_swap(bdms)
ephemerals = driver_block_device.convert_ephemerals(bdms)
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
swap = driver_block_device.legacy_block_devices(swap)
ephemerals = driver_block_device.legacy_block_devices(ephemerals)
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
# Get swap out of the list
swap = driver_block_device.get_swap(swap)
root_device_name = instance.get('root_device_name')
return {'swap': swap,
'root_device_name': root_device_name,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
# NOTE(danms): Remove this in v4.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# NOTE(melwitt): Remove this in v4.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# NOTE(sahid): Remove this in v4.0 of the RPC API
if (limits and 'numa_topology' in limits and
isinstance(limits['numa_topology'], six.string_types)):
db_obj = jsonutils.loads(limits['numa_topology'])
limits['numa_topology'] = (
objects.NUMATopologyLimits.obj_from_db_obj(db_obj))
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
retry['exc'] = traceback.format_exception(*sys.exc_info())
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
if self.driver.deallocate_networks_on_reschedule(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# cleanup those network resource setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
network_info = None
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception() as ctxt:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.UnexpectedTaskStateError as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@object_compat
@messaging.expected_exceptions(exception.BuildAbortException,
exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException,
exception.RescheduledException)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
# NOTE(alaski) This method should be deprecated when the scheduler and
# compute rpc interfaces are bumped to 4.x, and slated for removal in
# 5.x as it is no longer used.
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('%(action_str)s instance') %
{'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
instance.info_cache.delete()
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
self._update_resource_tracker(context, instance)
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
dict(vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
current_power_state=current_power_state),
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
do_stop_instance()
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance):
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = self.image_api.get(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
# Needed for nova-network, does nothing for neutron
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self._detach_volume(context, bdm.volume_id, instance,
destroy_bdm=False)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance, rotation):
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance, rotation)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _LW("Image not found during snapshot")
LOG.warn(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
self.image_api.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(context, self.image_api,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, instance, restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, the
post-cleanup current instance type and the to-be dropped
instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = instance.get_flavor('old')
drop_instance_type = instance.get_flavor()
instance.set_flavor(instance_type)
else:
instance_type = instance.get_flavor()
drop_instance_type = instance.get_flavor('old')
instance.delete_flavor('old')
instance.delete_flavor('new')
return sys_meta, instance_type, drop_instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type, old_instance_type = (
self._cleanup_stored_instance_types(instance))
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type, drop_instance_type = (
self._cleanup_stored_instance_types(instance, True))
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
migration.dest_compute = migration.source_compute
with migration.obj_as_admin():
migration.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance)
msg = _('destination same as source!')
raise exception.MigrationError(reason=msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.set_flavor(instance_type, 'new')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.set_flavor(instance_type)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.set_flavor(old_instance_type, 'old')
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if resize_instance:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._finish_resize(context, instance, migration,
disk_info, image)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_error_state(context, instance)
@object_compat
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms,
do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
if image:
shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
image_meta = image
else:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self._get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance, index=0, at_port=None):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = str(uuid.uuid4())
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance, index=index, at_port=at_port)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance, at_port=port)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None,
return_bdm_object=False):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create()
if return_bdm_object:
return bdm
else:
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _driver_detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
'volume %(volume_id)s from %(mp)s: %(err)s'),
{'volume_id': volume_id, 'mp': mp, 'err': err},
instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
"""
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug("Updating volume usage cache with totals",
instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
if destroy_bdm:
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
resize_to = 0
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# NOTE(PhilDay): Can't use object_compat decorator here as
# instance is not the second parameter
if isinstance(instance, dict):
metas = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=metas)
instance._context = context
try:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
try:
self.driver.attach_interface(instance, image_meta, network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warn(_LW("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s"),
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warn(_LW("deallocate port %(port_id)s failed"),
{'port_id': port_id}, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
return network_info[0]
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
try:
self.driver.detach_interface(instance, condemned)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, port_id=%(port_id)s,"
" reason: %(msg)s"),
{'port_id': port_id, 'msg': ex}, instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning(_LW('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s'),
{'port_id': port_id, 'error': ex},
instance=instance)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
"""
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# NOTE(danms): since instance is not the first parameter, we can't
# use @object_compat on this method. Since this is the only example,
# we do this manually instead of complicating the decorator
if not isinstance(instance, obj_base.NovaObject):
expected = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=expected)
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
def _live_migration_cleanup_flags(self, block_migration, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param block_migration: if true, it was a block migration
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(angdraug): block migration wouldn't have been allowed if either
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', is_shared_block_storage)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', is_shared_instance_path)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = block_migration or not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance.uuid)
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@object_compat
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks=True,
migrate_data=None):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_LW("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info(_LI('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.'),
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
cn.destroy()
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context, use_slave=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warning(_LW("set_bootable is not implemented "
"for the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _LW("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_error_state(context, instance)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.unquiesce(context, instance, image_meta)
# TODO(danms): This goes away immediately in Lemming and is just
# present in Kilo so that we can receive v3.x and v4.0 messages
class _ComputeV4Proxy(object):
target = messaging.Target(version='4.0')
def __init__(self, manager):
self.manager = manager
def add_aggregate_host(self, ctxt, aggregate, host, slave_info=None):
return self.manager.add_aggregate_host(ctxt, aggregate, host,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, network_id, instance):
return self.manager.add_fixed_ip_to_instance(ctxt,
network_id,
instance)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
return self.manager.attach_interface(ctxt, instance, network_id,
port_id, requested_ip)
def attach_volume(self, ctxt, instance, bdm):
# NOTE(danms): In 3.x, attach_volume had mountpoint and volume_id
# parameters, which are gone from 4.x. Provide None for each to
# the 3.x manager above and remove in Lemming.
return self.manager.attach_volume(ctxt, None, None,
instance=instance,
bdm=bdm)
def change_instance_metadata(self, ctxt, instance, diff):
return self.manager.change_instance_metadata(
ctxt, diff=diff, instance=instance)
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
return self.manager.check_can_live_migrate_destination(
ctxt, instance, block_migration, disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
return self.manager.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
return self.manager.check_instance_shared_storage(ctxt, instance, data)
def confirm_resize(self, ctxt, instance, reservations, migration):
return self.manager.confirm_resize(ctxt, instance,
reservations, migration)
def detach_interface(self, ctxt, instance, port_id):
return self.manager.detach_interface(ctxt, instance, port_id)
def detach_volume(self, ctxt, volume_id, instance):
# NOTE(danms): Pass instance by kwarg to help the object_compat
# decorator, as real RPC dispatch does.
return self.manager.detach_volume(ctxt, volume_id, instance=instance)
def finish_resize(self, ctxt, disk_info, image, instance,
reservations, migration):
return self.manager.finish_resize(ctxt, disk_info, image, instance,
reservations, migration)
def finish_revert_resize(self, ctxt, instance,
reservations, migration):
return self.manager.finish_revert_resize(ctxt, instance,
reservations, migration)
def get_console_output(self, ctxt, instance, tail_length):
return self.manager.get_console_output(ctxt, instance, tail_length)
def get_console_pool_info(self, ctxt, console_type):
return self.manager.get_console_pool_info(ctxt, console_type)
def get_console_topic(self, ctxt):
return self.manager.get_console_topic(ctxt)
def get_diagnostics(self, ctxt, instance):
return self.manager.get_diagnostics(ctxt, instance)
def get_instance_diagnostics(self, ctxt, instance):
return self.manager.get_instance_diagnostics(ctxt, instance)
def get_vnc_console(self, ctxt, console_type, instance):
return self.manager.get_vnc_console(ctxt, console_type, instance)
def get_spice_console(self, ctxt, console_type, instance):
return self.manager.get_spice_console(ctxt, console_type, instance)
def get_rdp_console(self, ctxt, console_type, instance):
return self.manager.get_rdp_console(ctxt, console_type, instance)
def get_serial_console(self, ctxt, console_type, instance, index=0, at_port=None):
return self.manager.get_serial_console(ctxt, console_type, instance, index=index, at_port=at_port)
def validate_console_port(self, ctxt, instance, port, console_type):
return self.manager.validate_console_port(ctxt, instance, port,
console_type)
def host_maintenance_mode(self, ctxt, host, mode):
return self.manager.host_maintenance_mode(ctxt, host, mode)
def host_power_action(self, ctxt, action):
return self.manager.host_power_action(ctxt, action)
def inject_network_info(self, ctxt, instance):
return self.manager.inject_network_info(ctxt, instance)
def live_migration(self, ctxt, dest, instance, block_migration,
migrate_data=None):
return self.manager.live_migration(ctxt, dest, instance,
block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
return self.manager.pause_instance(ctxt, instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration):
return self.manager.post_live_migration_at_destination(
ctxt, instance, block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
migrate_data=None):
return self.manager.pre_live_migration(ctxt, instance, block_migration,
disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type,
reservations=None, request_spec=None,
filter_properties=None, node=None, clean_shutdown=True):
return self.manager.prep_resize(ctxt, image, instance, instance_type,
reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node,
clean_shutdown=clean_shutdown)
def reboot_instance(self, ctxt, instance, block_device_info, reboot_type):
return self.manager.reboot_instance(ctxt, instance, block_device_info,
reboot_type)
def rebuild_instance(self, ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
return self.manager.rebuild_instance(
ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=preserve_ephemeral)
def refresh_security_group_rules(self, ctxt, security_group_id):
return self.manager.refresh_security_group_rules(ctxt,
security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id):
return self.manager.refresh_security_group_members(ctxt,
security_group_id)
def refresh_instance_security_rules(self, ctxt, instance):
return self.manager.refresh_instance_security_rules(ctxt, instance)
def refresh_provider_fw_rules(self, ctxt):
return self.manager.refresh_provider_fw_rules(ctxt)
def remove_aggregate_host(self, ctxt, host, slave_info, aggregate):
return self.manager.remove_aggregate_host(ctxt,
host, slave_info,
aggregate)
def remove_fixed_ip_from_instance(self, ctxt, address, instance):
return self.manager.remove_fixed_ip_from_instance(ctxt, address,
instance)
def remove_volume_connection(self, ctxt, instance, volume_id):
return self.manager.remove_volume_connection(ctxt, instance, volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref, clean_shutdown):
return self.manager.rescue_instance(ctxt, instance, rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
def reset_network(self, ctxt, instance):
return self.manager.reset_network(ctxt, instance)
def resize_instance(self, ctxt, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
return self.manager.resize_instance(ctxt, instance, image,
reservations, migration,
instance_type,
clean_shutdown=clean_shutdown)
def resume_instance(self, ctxt, instance):
return self.manager.resume_instance(ctxt, instance)
def revert_resize(self, ctxt, instance, migration, reservations=None):
return self.manager.revert_resize(ctxt, instance, migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance,
destroy_disks,
migrate_data):
return self.manager.rollback_live_migration_at_destination(
ctxt, instance, destroy_disks=destroy_disks,
migrate_data=migrate_data)
def set_admin_password(self, ctxt, instance, new_pass):
return self.manager.set_admin_password(ctxt, instance, new_pass)
def set_host_enabled(self, ctxt, enabled):
return self.manager.set_host_enabled(ctxt, enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
return self.manager.swap_volume(ctxt, old_volume_id, new_volume_id,
instance)
def get_host_uptime(self, ctxt):
return self.manager.get_host_uptime(ctxt)
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
return self.manager.reserve_block_device_name(ctxt, instance, device,
volume_id,
disk_bus=disk_bus,
device_type=device_type,
return_bdm_object=True)
def backup_instance(self, ctxt, image_id, instance, backup_type,
rotation):
return self.manager.backup_instance(ctxt, image_id, instance,
backup_type, rotation)
def snapshot_instance(self, ctxt, image_id, instance):
return self.manager.snapshot_instance(ctxt, image_id, instance)
def start_instance(self, ctxt, instance):
return self.manager.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, clean_shutdown):
return self.manager.stop_instance(ctxt, instance, clean_shutdown)
def suspend_instance(self, ctxt, instance):
return self.manager.suspend_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
return self.manager.terminate_instance(ctxt, instance, bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
return self.manager.unpause_instance(ctxt, instance)
def unrescue_instance(self, ctxt, instance):
return self.manager.unrescue_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance, reservations):
return self.manager.soft_delete_instance(ctxt, instance, reservations)
def restore_instance(self, ctxt, instance):
return self.manager.restore_instance(ctxt, instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
return self.manager.shelve_instance(ctxt, instance, image_id=image_id,
clean_shutdown=clean_shutdown)
def shelve_offload_instance(self, ctxt, instance, clean_shutdown):
return self.manager.shelve_offload_instance(ctxt, instance,
clean_shutdown)
def unshelve_instance(self, ctxt, instance, image=None,
filter_properties=None, node=None):
return self.manager.unshelve_instance(
ctxt, instance, image=image,
filter_properties=filter_properties,
node=node)
def volume_snapshot_create(self, ctxt, instance, volume_id, create_info):
return self.manager.volume_snapshot_create(ctxt, instance, volume_id,
create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
return self.manager.volume_snapshot_delete(ctxt, instance, volume_id,
snapshot_id, delete_info)
def external_instance_event(self, ctxt, instances, events):
return self.manager.external_instance_event(ctxt, instances, events)
def build_and_run_instance(self, ctxt, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
return self.manager.build_and_run_instance(
ctxt, instance, image, request_spec, filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
node=node, limits=limits)
def quiesce_instance(self, ctxt, instance):
return self.manager.quiesce_instance(ctxt, instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
return self.manager.unquiesce_instance(ctxt, instance, mapping=mapping)
| gpl-2.0 |
KINGbabasula/KING_kernel | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
kparal/anaconda | pyanaconda/ui/gui/hubs/progress.py | 1 | 11235 | # Progress hub classes
#
# Copyright (C) 2011-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
import gi
gi.require_version("GLib", "2.0")
gi.require_version("Gtk", "3.0")
from gi.repository import GLib, Gtk
import itertools
import os
import sys
import glob
from pyanaconda.i18n import _, C_
from pyanaconda.localization import find_best_locale_match
from pyanaconda.product import productName
from pyanaconda.flags import flags
from pyanaconda import iutil
from pyanaconda.constants import THREAD_INSTALL, THREAD_CONFIGURATION, DEFAULT_LANG, IPMI_FINISHED
from pykickstart.constants import KS_SHUTDOWN, KS_REBOOT
from pyanaconda.ui.gui.hubs import Hub
from pyanaconda.ui.gui.utils import gtk_action_nowait, gtk_call_once
__all__ = ["ProgressHub"]
class ProgressHub(Hub):
"""
.. inheritance-diagram:: ProgressHub
:parts: 3
"""
builderObjects = ["progressWindow"]
mainWidgetName = "progressWindow"
uiFile = "hubs/progress.glade"
helpFile = "ProgressHub.xml"
def __init__(self, data, storage, payload, instclass):
Hub.__init__(self, data, storage, payload, instclass)
self._totalSteps = 0
self._currentStep = 0
self._configurationDone = False
self._rnotes_id = None
def _do_configuration(self, widget=None, reenable_ransom=True):
from pyanaconda.install import doConfiguration
from pyanaconda.threads import threadMgr, AnacondaThread
assert self._configurationDone == False
self._configurationDone = True
# Disable all personalization spokes
self.builder.get_object("progressWindow-scroll").set_sensitive(False)
if reenable_ransom:
self._start_ransom_notes()
self._restart_spinner()
GLib.timeout_add(250, self._update_progress, self._configuration_done)
threadMgr.add(AnacondaThread(name=THREAD_CONFIGURATION, target=doConfiguration,
args=(self.storage, self.payload, self.data, self.instclass)))
def _start_ransom_notes(self):
# Adding this as a timeout below means it'll get called after 60
# seconds, so we need to do the first call manually.
self._cycle_rnotes()
self._rnotes_id = GLib.timeout_add_seconds(60, self._cycle_rnotes)
def _update_progress(self, callback=None):
from pyanaconda.progress import progressQ
import queue
q = progressQ.q
# Grab all messages may have appeared since last time this method ran.
while True:
# Attempt to get a message out of the queue for how we should update
# the progress bar. If there's no message, don't error out.
try:
(code, args) = q.get(False)
except queue.Empty:
break
if code == progressQ.PROGRESS_CODE_INIT:
self._init_progress_bar(args[0])
elif code == progressQ.PROGRESS_CODE_STEP:
self._step_progress_bar()
elif code == progressQ.PROGRESS_CODE_MESSAGE:
self._update_progress_message(args[0])
elif code == progressQ.PROGRESS_CODE_COMPLETE:
q.task_done()
# we are done, stop the progress indication
gtk_call_once(self._progressBar.set_fraction, 1.0)
gtk_call_once(self._progressLabel.set_text, _("Complete!"))
gtk_call_once(self._spinner.stop)
gtk_call_once(self._spinner.hide)
if callback:
callback()
# There shouldn't be any more progress bar updates, so return False
# to indicate this method should be removed from the idle loop.
return False
elif code == progressQ.PROGRESS_CODE_QUIT:
sys.exit(args[0])
q.task_done()
return True
def _configuration_done(self):
# Configuration done, remove ransom notes timer
# and switch to the Reboot page
GLib.source_remove(self._rnotes_id)
self._progressNotebook.set_current_page(1)
iutil.ipmi_report(IPMI_FINISHED)
# kickstart install, continue automatically if reboot or shutdown selected
if flags.automatedInstall and self.data.reboot.action in [KS_REBOOT, KS_SHUTDOWN]:
self.window.emit("continue-clicked")
def _install_done(self):
# package installation done, check personalization spokes
# and start the configuration step if all is ready
if not self._inSpoke and self.continuePossible:
self._do_configuration(reenable_ransom=False)
else:
# some mandatory spokes are not ready
# switch to configure and finish page
GLib.source_remove(self._rnotes_id)
self._progressNotebook.set_current_page(0)
def _do_globs(self, path):
return glob.glob(path + "/*.png") + \
glob.glob(path + "/*.jpg") + \
glob.glob(path + "/*.svg")
def _get_rnotes(self):
# We first look for rnotes in paths containing the language, then in
# directories without the language component. You know, just in case.
paths = ["/tmp/updates/pixmaps/rnotes/",
"/tmp/product/pixmaps/rnotes/",
"/usr/share/anaconda/pixmaps/rnotes/"]
all_lang_pixmaps = []
for path in paths:
all_lang_pixmaps += self._do_globs(path + "/*")
pixmap_langs = [pixmap.split(os.path.sep)[-2] for pixmap in all_lang_pixmaps]
best_lang = find_best_locale_match(os.environ["LANG"], pixmap_langs)
if not best_lang:
# nothing found, try the default language
best_lang = find_best_locale_match(DEFAULT_LANG, pixmap_langs)
if not best_lang:
# nothing found even for the default language, try non-localized rnotes
non_localized = []
for path in paths:
non_localized += self._do_globs(path)
return non_localized
best_lang_pixmaps = []
for path in paths:
best_lang_pixmaps += self._do_globs(path + best_lang)
return best_lang_pixmaps
def _cycle_rnotes(self):
# Change the ransom notes image every minute by grabbing the next
# image's filename. Note that self._rnotesPages is an infinite list,
# so this will cycle through the images indefinitely.
try:
nxt = next(self._rnotesPages)
except StopIteration:
# there are no rnotes
pass
else:
self._progressNotebook.set_current_page(nxt)
return True
def initialize(self):
Hub.initialize(self)
if flags.livecdInstall:
continueText = self.builder.get_object("rebootLabel")
continueText.set_text(_("%s is now successfully installed on your system and ready "
"for you to use! When you are ready, reboot your system to start using it!"))
continueText.set_line_wrap(True)
self.window.get_continue_button().set_label(C_("GUI|Progress", "_Quit"))
self._progressBar = self.builder.get_object("progressBar")
self._progressLabel = self.builder.get_object("progressLabel")
self._progressNotebook = self.builder.get_object("progressNotebook")
self._spinner = self.builder.get_object("progressSpinner")
lbl = self.builder.get_object("configurationLabel")
lbl.set_text(_("%s is now successfully installed, but some configuration still needs to be done.\n"
"Finish it and then click the Finish configuration button please.") %
productName)
lbl = self.builder.get_object("rebootLabel")
lbl.set_text(_("%s is now successfully installed and ready for you to use!\n"
"Go ahead and reboot to start using it!") % productName)
rnotes = self._get_rnotes()
# Get the start of the pages we're about to add to the notebook
rnotes_start = self._progressNotebook.get_n_pages()
if rnotes:
# Add a new page in the notebook for each ransom note image.
for f in rnotes:
img = Gtk.Image.new_from_file(f)
img.show()
self._progressNotebook.append_page(img, None)
# An infinite list of the page numbers containing ransom notes images.
self._rnotesPages = itertools.cycle(range(rnotes_start,
self._progressNotebook.get_n_pages()))
else:
# Add a blank page to the notebook and we'll just cycle to that
# over and over again.
blank = Gtk.Box()
blank.show()
self._progressNotebook.append_page(blank, None)
self._rnotesPages = itertools.cycle([rnotes_start])
def refresh(self):
from pyanaconda.install import doInstall
from pyanaconda.threads import threadMgr, AnacondaThread
Hub.refresh(self)
self._start_ransom_notes()
GLib.timeout_add(250, self._update_progress, self._install_done)
threadMgr.add(AnacondaThread(name=THREAD_INSTALL, target=doInstall,
args=(self.storage, self.payload, self.data, self.instclass)))
def _updateContinueButton(self):
if self._configurationDone:
self.window.set_may_continue(self.continuePossible)
else:
self.builder.get_object("configureButton").set_sensitive(self.continuePossible)
def _init_progress_bar(self, steps):
self._totalSteps = steps
self._currentStep = 0
gtk_call_once(self._progressBar.set_fraction, 0.0)
def _step_progress_bar(self):
if not self._totalSteps:
return
self._currentStep += 1
gtk_call_once(self._progressBar.set_fraction, self._currentStep/self._totalSteps)
def _update_progress_message(self, message):
if not self._totalSteps:
return
gtk_call_once(self._progressLabel.set_text, message)
@gtk_action_nowait
def _restart_spinner(self):
self._spinner.show()
self._spinner.start()
| gpl-2.0 |
carlgao/lenga | images/lenny64-peon/usr/share/python-support/uptrack/Uptrack.py | 1 | 48758 | #!/usr/bin/env python
# Copyright (C) 2008-2011 Ksplice, Inc.
# Author: Waseem Daher
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
# 02110-1301, USA.
import sys
import datetime
import errno
import os
import os.path
import re
import socket
import urllib
import posixpath
import pycurl
import cStringIO as StringIO
import ConfigParser
import select
import logging
import random
import traceback
import textwrap
import glob
if sys.version_info >= (2, 6, 2, 'final', 0): # subprocess_compat comes from 2.6.2
import subprocess
else:
import subprocess_compat as subprocess
try: set
except NameError: from sets import Set as set
import yaml
try:
from yaml import CSafeLoader as yaml_loader
from yaml import CSafeDumper as yaml_dumper
except ImportError:
from yaml import SafeLoader as yaml_loader
from yaml import SafeDumper as yaml_dumper
from uptrack import version
try:
import gconf
have_gconf = True
except ImportError:
have_gconf = False
__version__ = version.version
STATUS_FILE_FORMAT_VERSION = "2"
USERAGENT='Uptrack/' + __version__
BUG_EMAIL='support@ksplice.com'
USE_SERVER_RESOLVER=True
UPTRACK_CONFIG_FILE='/etc/uptrack/uptrack.conf'
UPTRACK_UUID_FILE='/var/lib/uptrack/uuid'
UPTRACK_SERIAL_FILE='/var/lib/uptrack/serial'
UPTRACK_CACHE_DIR="/var/cache/uptrack"
UPDATE_REPO_URL="https://updates.ksplice.com/update-repository"
# We can't put this under /var/cache/uptrack, because we want
# it to be world-readable.
UPTRACK_EFFECTIVE_KERNEL_FILE='/var/lib/uptrack/effective_kernel'
# This value, in Uptrack.Result.code, indicates that the error was due
# to a network failure.
ERROR_NO_NETWORK = 10
# Uptrack threw an unhandled exception
ERROR_INTERNAL_ERROR = 11
# User answered "no" to the confirmation prompt
ERROR_USER_NO_CONFIRM = 12
# Running kernel is not supported by Uptrack
ERROR_UNSUPPORTED = 13
# The user's access key was invalid
ERROR_INVALID_KEY = 14
# The Uptrack client is too old to manage the updates
ERROR_TOO_OLD_INSTALL = 15
# The Uptrack client is too old to even parse packages.yml
ERROR_TOO_OLD_PARSE = 16
# Your subscription to the Ksplice Uptrack service has expired
ERROR_EXPIRED = 17
# The Uptrack server returned an internal error
ERROR_INTERNAL_SERVER_ERROR = 18
# The machine has not yet been activated for use with the Uptrack service.
ERROR_MACHINE_NOT_ACTIVATED = 19
# The user's access key is missing
ERROR_MISSING_KEY = 20
# The sysfs filesystem isn't mounted at /sys
ERROR_SYS_NOT_MOUNTED = 21
def mkdirp(dir):
"""
Essentially, mkdir -p
"""
try:
os.makedirs(dir)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def write_file(path, data):
fh = open(path, 'w')
try:
fh.write(data)
finally:
fh.close()
# Accept a mode argument so that callers can pass 'rb' if they need binary IO.
def read_file(path, mode='r'):
fh = open(path, mode)
try:
return fh.read()
finally:
fh.close()
def yaml_load(stream, **kwargs):
return yaml.load(stream, Loader=yaml_loader, **kwargs)
def yaml_dump(obj, stream=None, **kwargs):
return yaml.dump(obj, stream, Dumper=yaml_dumper, **kwargs)
def getConfigBooleanOrDie(config, section, option, default):
"""
Return the value of a boolean config option, or `default` if no value is
given.
Raise a ResultException on invalid (non-boolean) values.
"""
if config.has_option(section, option):
try:
return config.getboolean(section, option)
except ValueError, e:
msg = """Unable to read %s setting from %s.
%s
Please check that %s is set to 'yes' or 'no' and try again.""" % (
option, UPTRACK_CONFIG_FILE, e, option)
raise ResultException(1, msg)
else:
return default
def queryRealArch(userarch):
try:
p = subprocess.Popen(['setarch', 'linux64', 'uname', '-m'], stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out = p.communicate()[0].strip()
if p.returncode == 0:
return out
p = subprocess.Popen(['setarch', 'x86_64', 'uname', '-m'], stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out = p.communicate()[0].strip()
if p.returncode == 0:
return out
except (subprocess.CalledProcessError, OSError):
logging.debug("Unable to determine the kernel architecture")
logging.debug(traceback.format_exc())
return userarch
def getUname():
"""
Gets the uname, but lies a little, since the arch field is
actually governed by 'personality' and not the real architecture.
Note that this returns both the architecture Uptrack is being run
under, as well as the architecture of the kernel itself (i.e.
'uname -m' and 'linux64 uname -m').
"""
sysname, hostname, release, version, userarch = os.uname()
arch = queryRealArch(userarch)
if arch in ['i686', 'i586', 'i486']: arch = 'i386'
if userarch in ['i686', 'i586', 'i486']: userarch = 'i386'
uname = (sysname, hostname, release, version, arch, userarch)
return uname
__curl = None
def initCurl(config=None):
"""Initialize the shared cURL object for getCurl().
"""
global __curl
if __curl is None:
__curl = pycurl.Curl()
__curl.setopt(pycurl.USERAGENT, USERAGENT)
__curl.setopt(pycurl.OPT_FILETIME, 1)
__curl.setopt(pycurl.FOLLOWLOCATION, 1)
__curl.setopt(pycurl.MAXREDIRS, 5)
__curl.setopt(pycurl.ENCODING, '')
if config and config.ssl_ca_certs:
for type, value in config.ssl_ca_certs:
__curl.setopt(type, value)
else:
__curl.setopt(pycurl.CAINFO, "/usr/share/uptrack/ca-certificates.crt")
__curl.setopt(pycurl.CONNECTTIMEOUT, 30)
__curl.setopt(pycurl.TIMEOUT, 600)
if config and config.proxy is not None:
__curl.setopt(pycurl.PROXY, config.proxy)
if config and getattr(config, 'verbose', 0) > 1:
__curl.setopt(pycurl.VERBOSE, 1)
def getCurl():
"""Return a shared cURL object for use by Uptrack.
For performance, this always returns the same cURL object, in
order to allow libcURL to reuse connections as much as
possible. In order for this to work properly, callers should
always explicitly set the HTTP method they desire before calling
`.perform()`, and should reset any other unusual properties they
set on the cURL object to a reasonable default value when they're
done.
Needless to say, this is not thread-safe.
You must call initCurl() before using this function.
"""
return __curl
def verrevcmp(a, b):
"""Emulates dpkg's verrevcmp() in lib/vercmp.c."""
def order(x):
if x == '~': return -1
if x.isdigit(): return 0
if not x: return 0
if x.isalpha(): return ord(x)
return ord(x) + 256
def num(s):
if not s: return 0
return int(s)
while a or b:
first_diff = 0
while (a and not a[0].isdigit()) or (b and not b[0].isdigit()):
d = cmp(order(a[:1]), order(b[:1]))
if d: return d
a = a[1:]; b = b[1:]
an, a = re.match('^([0-9]*)(.*)', a).groups()
bn, b = re.match('^([0-9]*)(.*)', b).groups()
d = cmp(num(an), num(bn))
if d: return d
return 0
def parseversion(v):
"""Emulates dpkg's parseversion(), in lib/parsehelp.c."""
if ':' in v:
epochstr, rest = v.split(':', 1)
epoch = int(epochstr)
else:
epoch = 0
rest = v
if '-' in rest:
version, revision = rest.split('-', 1)
else:
version, revision = rest, ''
return epoch, version, revision
def compareversions(a, b):
"""Emulates dpkg --compare-versions. Returns -1, 0, 1 like cmp()."""
ae, av, ar = parseversion(a)
be, bv, br = parseversion(b)
return cmp(ae, be) or verrevcmp(av, bv) or verrevcmp(ar, br)
def cmp_order(a, b):
return cmp(a.order, b.order)
class Result(object):
def __init__(self, code = 0, message = ''):
self.code = code
self.message = message
self.succeeded = []
self.failed = []
self.debug = None
self.alert = None
self.desupported = None
self.tray_icon_error = None
self.newkernel = False
self.uptrack_log = None
def resultFromPycurl(config, e):
if e[0] in [pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_COULDNT_CONNECT,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to the Ksplice Uptrack server. "
"A network connection is needed to ensure you have "
"the latest list of updates to install. "
"Please check your Internet connection and try again. "
"If this computer does not have direct access to the Internet, "
"you will need to configure an https proxy in %s." % UPTRACK_CONFIG_FILE)
elif e[0] == pycurl.E_COULDNT_RESOLVE_PROXY:
msg = ("Could not resolve your proxy server (%s) while trying to "
"connect to the Ksplice Uptrack server. You should check that "
"this machine can directly connect to the proxy server configured "
"in %s." % (config.proxy, UPTRACK_CONFIG_FILE))
elif e[0] == pycurl.E_URL_MALFORMAT:
msg = ("Malformed URL <%s> for Uptrack server. Please correct the "
"value of Network.update_repo_url in %s." %
(config.remoteroot, UPTRACK_CONFIG_FILE))
elif e[0] == pycurl.E_SSL_CACERT:
msg = "Could not verify the Ksplice Uptrack server's SSL certificate. "
if config.remoteroot == UPDATE_REPO_URL:
msg += ("Check your network configuration, and contact %s for "
"assistance if you are unable to resolve this error." %
(BUG_EMAIL,))
else:
msg += ("You may need to update ssl_ca_cert_file or "
"ssl_ca_cert_dir in %s with the path to an appropriate "
"CA. Please consult %s for assistance if you are "
"unable to resolve this error." %
(UPTRACK_CONFIG_FILE, BUG_EMAIL))
else:
msg = ("Unexpected error communicating with the Ksplice Uptrack server. "
"Please check your network connection and try again. "
"If this error re-occurs, e-mail %s. " %
(BUG_EMAIL,))
msg = textwrap.fill(msg) + "\n\n(Network error: " + e[1] + ")"
return Result(ERROR_NO_NETWORK, msg)
class ResultException(Exception):
def __init__(self, code, message):
# We can't use super here because Exception is an old-style
# class in python 2.4
Exception.__init__(self, code, message)
self.result = Result(code, message)
server_error_exception = ResultException(ERROR_INTERNAL_SERVER_ERROR, """\
The Ksplice Uptrack service has experienced a transient error. Please
wait a few minutes and try again. If this error persists, please
contact %s for assistance.""" % (BUG_EMAIL,))
class ActionResult(object):
def __init__(self, update, command):
self.code = 0
self.message = ''
self.update = update
self.command = command
self.abort_code = None
self.stack_check_processes = None
self.nomatch_modules = None
self.locked_modules = []
self.usedby_modules = []
self.depmod_needed = False
self.debug = ''
self.core_version = update.getCoreVersion()
def asDict(self):
d = {}
d['Command'] = self.command
d['ID'] = self.update.id
d['Name'] = self.update.name
d['Message'] = self.message
d['Abort'] = self.abort_code
d['Core Version'] = self.core_version
d['Stack Check'] = self.stack_check_processes
d['Nonmatching Modules'] = self.nomatch_modules
d['Locked Modules'] = self.locked_modules
d['UsedBy Modules'] = self.usedby_modules
return d
def getKernelDict():
sysname, _, release, version, userarch = os.uname()
return { 'Sysname': sysname
, 'Release': release
, 'Version': version
, 'UserArchitecture': userarch }
class Status(object):
def __init__(self, statusdir):
self.statusdir = statusdir
self.statusloc = os.path.join(statusdir, 'status')
self.resultsloc = os.path.join(statusdir, 'results')
self.upgradeloc = os.path.join(statusdir, 'upgrade_plan')
self.stamploc = os.path.join(statusdir, 'results.server-stamp')
# An explanation of return values:
# - 'None' means status or results file does not exist
# - If x is returned, x['Result']['Code'] will be populated
# with an error code and if the error code is nonzero,
# x['Result']['Message'] will have an error message.
# - If the error code is 2, then the upgrade plan are not available
# - If the error code is 3, then the installed updates are not available.
def readStatus(self):
try:
f = open(self.statusloc)
status = yaml_load(f)
f.close()
except IOError, e:
if e.errno == errno.EACCES:
if os.path.exists('/etc/debian_version'):
recommendation = 'sudo adduser $USER adm'
else:
recommendation = 'gpasswd -a <your username> adm (as root)'
status = {}
status['Result'] = {}
status['Result']['Code'] = 3
status['Result']['Message'] = \
("Permission denied reading the status file. You need to be in the adm "
"group in order to use the the Ksplice Uptrack Manager; you can add yourself by running\n\n"
"%s\n\nYou will need to log out and back in "
"for this change to take effect." % recommendation)
return status
elif e.errno == errno.ENOENT:
return None
else:
status = {}
status['Result'] = {}
status['Result']['Code'] = 3
status['Result']['Message'] = "Error reading status file (%s): %s\n" % \
(self.statusloc, os.strerror(e.errno))
return status
try:
f = open(self.upgradeloc)
upgrade = yaml_load(f)
f.close()
status.update(upgrade)
except IOError, e:
if e.errno == errno.ENOENT:
status['Plan'] = []
else:
status['Plan'] = []
status['Result'] = {}
status['Result']['Code'] = 2
status['Result']['Message'] = "Error reading upgrade plan (%s): %s\n" % \
(self.upgradeloc, os.strerror(e.errno))
return status
try:
f = open(self.resultsloc)
results = yaml_load(f)
f.close()
status.update(results)
except IOError, e:
status['Result'] = {}
if e.errno == errno.ENOENT:
status['Result']['Code'] = 0
else:
status['Result']['Code'] = 1
status['Result']['Message'] = "Error reading results file (%s): %s\n" % \
(self.resultsloc, os.strerror(e.errno))
return status
def _writeFile(self, contents, file):
dir = os.path.dirname(file)
if not os.path.isdir(dir):
os.makedirs(dir)
f = open(file, 'w')
yaml_dump(contents, f, version=(1, 1),
explicit_start=True, explicit_end=True)
f.close()
def addIdentity(config, d, local_status=None):
d['Client'] = {}
d['Client']['Hostname'] = getattr(config, 'hostname', None)
d['Client']['FullHostname'] = getattr(config, 'fullhostname', None)
d['Client']['Key'] = config.accesskey
d['Client']['UUID'] = config.uuid
if config.newuuid:
d['Client']['NewUUID'] = config.newuuid
if config.olduuid:
d['Client']['OldUUID'] = config.olduuid
d['Client']['CPUInfo'] = config.cpuinfo
d['Client']['UptrackVersion'] = __version__
try:
d['Client']['Uptime'] = read_file('/proc/uptime').split()[0]
except IOError:
logging.debug(traceback.format_exc())
d['Client']['Uptime'] = -1
try:
d['Client']['RebootsSaved'] = len(file(os.path.join(config.localroot,
'reboots_saved')).readlines())
except IOError, e:
if e.errno == errno.ENOENT:
d['Client']['RebootsSaved'] = 0
else:
d['Client']['RebootsSaved'] = -1
logging.debug(traceback.format_exc())
if inVirtualBox():
d['Client']['VirtualBox'] = True
d['Client']['VMInfo'] = config.vminfo
if 'IP' in config.localip:
d['Client']['LocalIP'] = config.localip['IP']
else:
d['Client']['LocalIP_error'] = config.localip['Error']
d['Client']['Config'] = {}
d['Client']['Config']['Autoinstall'] = getattr(config, 'cron_autoinstall', False)
if getattr(config, 'init', None) is not None:
d['Client']['Config']['Init'] = getattr(config, 'init')
d['Client']['Config']['Cron'] = getattr(config, 'cron', False)
d['Client']['MmapMinAddr'] = getMmapMinAddr()
serial_stat = getattr(config, 'serial_stat', None)
if serial_stat is not None:
d['Client']['SerialStat'] = serial_stat
d['Client']['Tools'] = {}
for key, path in [('Depmod', '/sbin/depmod'), ('Modprobe', '/sbin/modprobe')]:
val = {}
try:
val['Stat'] = tuple(os.stat(path))
except OSError:
val['Stat'] = ()
try:
val['Link'] = os.readlink(path)
except OSError:
val['Link'] = ''
d['Client']['Tools'][key] = val
d['Kernel'] = {}
d['Kernel']['Sysname'] = config.sysname
d['Kernel']['Release'] = config.release
d['Kernel']['Version'] = config.version
d['Kernel']['Architecture'] = config.arch
d['Kernel']['UserArchitecture'] = config.userarch
if config.run_uuid:
d['RunUUID'] = config.run_uuid
else:
d['RunUUID_error'] = config.run_uuid_error
if local_status is not None:
effective = local_status.getEffective()
if effective is not None:
effective = effective['PackageVersion']
d['ClientEffectiveKernel'] = effective
addIdentity = staticmethod(addIdentity)
def writeStatus(self, local, new_client, installed_updates):
status = {}
status['Status format version'] = STATUS_FILE_FORMAT_VERSION
status['Time'] = datetime.datetime.utcnow()
self.addIdentity(local.client_config, status, local_status=local)
status['Updates'] = {}
status['Updates']['Installed'] = []
# Python 2.3 doesn't have sorted() or sort(key = ...)
installed_sorted = list(installed_updates)
installed_sorted.sort(cmp_order)
for u in installed_sorted:
d = {}
d['ID'] = u.id
d['Name'] = u.name
status['Updates']['Installed'].append(d)
status['New client'] = new_client
self._writeFile(status, self.statusloc)
def writeResults(self, local, res):
results = {}
results['Results format version'] = STATUS_FILE_FORMAT_VERSION
results['Time'] = datetime.datetime.utcnow()
self.addIdentity(local.client_config, results, local_status=local)
results['Result'] = {}
results['Result']['Succeeded'] = []
for action in res.succeeded:
d = action.asDict()
results['Result']['Succeeded'].append(d)
results['Result']['Failed'] = []
for action in res.failed:
d = action.asDict()
results['Result']['Failed'].append(d)
results['Result']['Code'] = res.code
results['Result']['Message'] = res.message
if res.debug is not None:
results['Debug'] = res.debug
if res.uptrack_log is not None:
results['UptrackLog'] = res.uptrack_log
if res.alert is not None:
results['Result']['Alert'] = res.alert
if res.desupported is not None:
results['Result']['Desupported'] = res.desupported
if res.tray_icon_error is not None:
results['Result']['TrayIconError'] = res.tray_icon_error
if res.newkernel:
results['Result']['New Kernel'] = True
if local.client_config.uninstall:
results['Result']['Uninstalled'] = True
self._writeFile(results, self.resultsloc)
def writePlan(self, name, actions):
plan = {}
plan[name.title()+' plan format version'] = STATUS_FILE_FORMAT_VERSION
plan['Time'] = datetime.datetime.utcnow()
plan['Plan'] = [ dict([(k, act[k]) for k in
('Command', 'ID', 'Name', 'EffectiveKernel') if k in act])
for act in actions ]
self._writeFile(plan, os.path.join(self.statusdir, name+'_plan'))
def writeUpgradePlan(self, plan):
self.writePlan('upgrade', plan)
def writeInitPlan(self, plan):
self.writePlan('init', plan)
def writeRemovePlan(self, plan):
self.writePlan('remove', plan)
def writeEffectiveKernel(self, effective, ids):
out = { 'EffectiveKernel': effective
, 'OriginalKernel': getKernelDict()
, 'Installed': ids }
self._writeFile(out, UPTRACK_EFFECTIVE_KERNEL_FILE)
def sendResultToServer(self, config):
try:
## Results file might not exist if this is the first time
## uptrack is run and there is nothing to report (e.g. 'show')
contents = read_file(self.resultsloc)
except IOError:
return
results_time = yaml_load(contents)['Time']
try:
stamp_time = yaml_load(read_file(self.stamploc))
if stamp_time >= results_time:
return
except (IOError, yaml.YAMLError, TypeError):
pass
status_url = posixpath.join(config.remote,
urllib.quote('result'))
c = getCurl()
c.setopt(pycurl.URL, status_url)
c.setopt(pycurl.HTTPPOST, [('result', contents)])
c.setopt(pycurl.WRITEFUNCTION, lambda data: None)
c.perform()
yaml_dump(results_time, file(self.stamploc, 'w'))
class LocalStatus(object):
def __init__(self, config, remote_repo, logger):
self.client_config = config
self.statusdir = config.local
self.installed = set()
self.new_client = False
self.effective_kernel = None
self.remote_repo = remote_repo
self.logger = logger
def getInstalledIDs(self):
installed_ids = []
for f in glob.glob('/sys/module/ksplice_*/ksplice'):
if read_file(os.path.join(f,'stage')).strip() == 'applied':
installed_ids.append(re.match('^/sys/module/ksplice_(.*)/ksplice$',
f).group(1))
for f in glob.glob('/sys/kernel/ksplice/*/stage'):
if read_file(f).strip() == 'applied':
installed_ids.append(re.match('^/sys/kernel/ksplice/(.*)/stage$',
f).group(1))
return installed_ids
def setEffective(self, effective):
sysname, arch, release, version = effective[0].split('/')
self.effective_kernel = {
'Sysname' : sysname
, 'Architecture' : arch
, 'Release' : release
, 'Version' : version
, 'PackageVersion': effective[1] }
def getEffective(self):
"""Returns the effective kernel, either as set in this run or as
loaded from disk. Returns None if the effective kernel cannot
be determined."""
if self.effective_kernel is not None:
return self.effective_kernel
try:
f = open(UPTRACK_EFFECTIVE_KERNEL_FILE, 'r')
effective = yaml_load(f)
f.close()
except (IOError, yaml.YAMLError):
return None
# Check that we booted into the same kernel as when the effective kernel
# data was written.
if getKernelDict() != effective['OriginalKernel']:
return None
# Check that we have the same updates loaded now as then.
were_installed = set(effective['Installed'])
now_installed = set(self.getInstalledIDs())
if were_installed != now_installed:
return None
self.effective_kernel = effective['EffectiveKernel']
return self.effective_kernel
def getInstalledUpdates(self):
list_installed = []
for id in self.getInstalledIDs():
u = self.remote_repo.idToUpdate(id)
if u:
list_installed.append(u)
self.installed = set(list_installed)
return self.installed
def unpackPlan(self, plan):
"""Augment a plan we read or downloaded with some extra info."""
for act in plan:
act['Update'] = self.remote_repo.idToUpdate(act['ID'])
act['Name'] = act['Update'].name
def readPlan(self, which_plan):
f = open(os.path.join(self.statusdir, which_plan + '_plan'), "r")
actions = yaml_load(f)['Plan']
f.close()
self.unpackPlan(actions)
return actions
def writeOutStatus(self, res, upgrade_plan, init_plan, remove_plan):
s = Status(self.statusdir)
logging.debug("Writing status to file.")
try:
# Call getEffective in case the file already has an effective
# version, which we have neither loaded nor updated.
self.getEffective()
installed = self.getInstalledUpdates()
s.writeStatus(self, self.new_client, installed)
if res is not None:
if res.code != 0:
res.uptrack_log = self.logger.getDebugLog()
s.writeResults(self, res)
if upgrade_plan is not None:
s.writeUpgradePlan(upgrade_plan)
if init_plan is not None:
s.writeInitPlan(init_plan)
if remove_plan is not None:
s.writeRemovePlan(remove_plan)
if self.effective_kernel is not None:
s.writeEffectiveKernel(self.effective_kernel, [u.id for u in installed])
except Exception:
logging.warning("Unable to write out status files")
logging.debug(traceback.format_exc())
return False
if (self.client_config.allow_net and
(not res or not res.code or res.code not in
(ERROR_NO_NETWORK,
ERROR_INVALID_KEY,
ERROR_MISSING_KEY))):
logging.debug("Sending result to server.")
try:
if res is not None:
s.sendResultToServer(self.client_config)
except Exception:
logging.warning("Unable to send status to management server")
logging.debug(traceback.format_exc())
return False
return True
def readInitPlan(self):
return self.readPlan('init')
def readRemovePlan(self):
return self.readPlan('remove')
def readUpgradePlan(self):
return self.readPlan('upgrade')
class PackageList(object):
def __init__(self, text):
pl = yaml_load(text)
self.package_list_yaml = pl
self.error = None
self.protocolVersion = None
self.kspliceToolsApiVersion = None
self.release = None
self.version = None
self.arch = None
self.clientVersionToInstall = '0'
self.clientVersionToParse = '0'
self.protocolVersion = pl['Protocol version']
self.kspliceToolsApiVersion = pl['Client']['Ksplice Tools API version']
kern = pl['Kernel']
self.release, self.version, self.arch = \
kern['Release'], kern['Version'], kern['Architecture']
self.clientVersionToParse = pl['Client'].get('Version to Parse', '0')
self.clientVersionToInstall = pl['Client'].get('Version to Install', '0')
self.ids = []
self.packageData = {}
for item in pl['Updates']:
self.ids.append(item['ID'])
self.packageData[item['ID']] = item
def download(c, url, filename, ifmodified=True, stringio=None):
"""Downloads a file to disk with PycURL.
`c` - A pycurl.Curl() object. You probably want getCurl().
`url` - URL to download.
`filename` - Filename to download to.
`ifmodified` - If `filename` exists, only re-download it if the server's
copy of `url` is newer (i.e., do the If-Modified-Since / 304
Not Modified thing).
`stringio` - A (c)StringIO object that will be used to read content
from the server. This can be useful if a caller needs the
content of the response even if the server doesn't return
a 200 OK.
Returns the HTTP response code; if you want more information, use
c.getinfo().
Raises non-ENOENT errors from os.stat, and any error from pycurl.
"""
try:
if ifmodified:
try:
t = int(os.stat(filename).st_mtime)
c.setopt(pycurl.TIMEVALUE, t)
c.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if stringio:
s = stringio
else:
s = StringIO.StringIO()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPGET, 1)
c.setopt(pycurl.WRITEFUNCTION, s.write)
c.perform()
rcode = c.getinfo(pycurl.RESPONSE_CODE)
if rcode == 200:
mkdirp(os.path.dirname(filename))
try:
write_file(filename, s.getvalue())
except Exception, e:
# If the entire file didn't get written, try not to leave a
# partial copy
try:
os.remove(filename)
except OSError, ee:
if ee.errno != errno.ENOENT:
raise ee
raise e
t = c.getinfo(pycurl.INFO_FILETIME)
if t > 0:
os.utime(filename, (t, t))
elif rcode >= 400 and rcode != 404:
logging.debug("The server returned error code %d:", rcode)
logging.debug(s.getvalue())
return rcode
finally:
c.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_NONE)
class UptrackConfig(object):
def __init__(self):
self.sysname, self.orig_hostname, self.release, self.version, self.arch, self.userarch = getUname()
self.hostname = None
config = ConfigParser.SafeConfigParser()
try:
config.read([UPTRACK_CONFIG_FILE])
except ConfigParser.Error, e:
raise ResultException(1, "Unable to parse config file: " + e.message)
self.config = config
self.setMisc()
self.setProxy()
self.setSSL()
self.setRepoPaths()
self.setCPUInfo()
self.setModules()
self.setVMInfo()
self.setIP()
self.removableModules = None
def setCPUInfo(self):
sockets = {}
processors = 0
try:
for line in open("/proc/cpuinfo").readlines():
if line.startswith("physical id"):
pid = line.split(":")[1][1:]
if pid in sockets:
sockets[pid] += 1
else:
sockets[pid] = 1
if line.startswith("processor\t"):
processors += 1
except IOError:
logging.debug(traceback.format_exc())
self.cpuinfo = [0, 0]
else:
if sockets == {}:
# Virtual machine with no physical processors
self.cpuinfo = [0, processors]
else:
self.cpuinfo = [len(sockets.keys()), sum(sockets.values())]
def setModules(self):
self.modules = []
try:
for line in open("/proc/modules").readlines():
(name, size) = line.split()[0:2]
if name.startswith("ksplice"):
continue
self.modules.append([name, size])
except IOError:
logging.debug(traceback.format_exc())
self.modules.sort()
def newUUID(self):
uuid = None
try:
proc = subprocess.Popen(['uuidgen'], stdout=subprocess.PIPE)
uuid = proc.communicate()[0].strip()
except subprocess.CalledProcessError:
raise ResultException(1, "Unable to generate a new Uptrack UUID.")
try:
mkdirp(os.path.dirname(UPTRACK_UUID_FILE))
write_file(UPTRACK_UUID_FILE, uuid + "\n")
except (IOError, OSError), e:
raise ResultException(1, "Unable to write the Uptrack UUID file " +
UPTRACK_UUID_FILE + ":\n " + str(e))
return uuid
def regenerateCron(self):
p = subprocess.Popen(['/usr/lib/uptrack/regenerate-crontab'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = p.communicate()
if p.returncode != 0:
logging.debug("Error regenerating crontab.")
logging.debug(output)
try:
os.unlink(os.path.join(self.localroot, 'backoff-counter'))
os.unlink(os.path.join(self.localroot, 'backoff'))
except OSError:
pass
def updateBackoff(self, backoff):
try: old = read_file(self.localroot+'/backoff')
except IOError:
old = None
if old != str(backoff)+'\n':
write_file(self.localroot+'/backoff', str(backoff)+'\n')
write_file(self.localroot+'/backoff-counter',
str(random.randrange(0, backoff))+'\n')
def configureHostname(self):
"""
Adjust `hostname` if hostname_override_file is set, and set `fullhostname`.
"""
if self.config.has_option('Settings', 'hostname_override_file'):
hostname_override_file = self.config.get('Settings', 'hostname_override_file')
try:
self.fullhostname = self.hostname = read_file(hostname_override_file).strip()
if not self.hostname:
logging.error("You must supply a non-empty hostname.")
logging.error("Please check the hostname_override_file option in /etc/uptrack/uptrack.conf.")
sys.exit(1)
except (IOError, OSError):
logging.error("Unable to read hostname from %s." % (hostname_override_file,))
logging.error("Please check the hostname_override_file option in /etc/uptrack/uptrack.conf.")
sys.exit(1)
else:
self.hostname = self.orig_hostname
try:
self.fullhostname = socket.gethostbyaddr(self.hostname)[0]
except socket.error:
self.fullhostname = ''
def setMisc(self):
self.lockfile = "/var/lib/uptrack/lock"
self.accesskey = ""
if self.config.has_option('Auth', 'accesskey'):
self.accesskey = self.config.get('Auth', 'accesskey')
self.uuid = None
self.newuuid = None
self.olduuid = None
self.debug_to_server = getConfigBooleanOrDie(
self.config, 'Settings', 'debug_to_server', True)
self.use_hw_uuid = getConfigBooleanOrDie(
self.config, 'Auth', 'use_hw_uuid', False)
self.no_rmmod = getConfigBooleanOrDie(
self.config, 'Settings', 'no_rmmod', False)
self.run_uuid = None
self.run_uuid_error = None
try:
p = subprocess.Popen(['uuidgen'], stdout=subprocess.PIPE)
self.run_uuid = p.communicate()[0].strip()
except subprocess.CalledProcessError:
self.run_uuid_error = traceback.format_exc()
def initWithLock(self):
# Note! This is not called by __init__, because UptrackConfig is not
# initialized under the repository lock. This must be called separately
# once the lock is held.
self.serial = 0
self.serial_stat = None
uuid = None
if self.use_hw_uuid:
uuid = self.vminfo.get('uuid').lower()
if uuid == '00000000-0000-0000-0000-000000000000':
uuid = None
if uuid is None:
try:
uuid = read_file(UPTRACK_UUID_FILE).strip()
try:
self.serial = int(read_file(UPTRACK_SERIAL_FILE).strip())
except ValueError:
self.serial_stat = tuple(os.stat(UPTRACK_SERIAL_FILE))
except (IOError, OSError):
pass
if not uuid:
uuid = self.newUUID()
self.setUUID(uuid)
self.configureHostname()
def incrementSerial(self):
""" Increment self.serial and write the result to disk.
Returns the previous serial number.
"""
old = self.serial
self.serial += 1
try:
tmp_serial_file = UPTRACK_SERIAL_FILE + ".tmp"
write_file(tmp_serial_file, "%d\n" % (self.serial,))
os.rename(tmp_serial_file, UPTRACK_SERIAL_FILE)
except (IOError, OSError), e:
logging.debug("Unable to store new serial", exc_info=True)
raise ResultException(1,
"Unable to increment the Uptrack serial number (%s):\n%s"
% (UPTRACK_SERIAL_FILE, e))
return old
def setProxy(self):
""" Set self.proxy based on config and the environment.
Set self.proxy to the value of a proxy server to use to talk to the
Uptrack server, based on the config file, the envrionment, and the
global GConf database if available.
Upon return, self.proxy will be set in one of three ways:
- None: No proxy setting was detected. Uptrack will let pycurl attempt
to choose a proxy based on its own defaults.
- '': The user explicitly requested that no proxy be used. Uptrack will
force pycurl not to use a proxy.
- Any other string: The URL of an HTTPS proxy server to use with
the CONNECT method.
In order to allow the user to explicitly specify "no proxy" globally, we
accept the value 'none' (case insensitive) in the Network.https_proxy
setting in uptrack.conf, and translate it to self.proxy = ''. An empty
setting is taken to be unset, and will result in self.proxy being None.
(Note that, confusingly, this means that "Network.https_proxy = none"
corresponds to self.proxy = '', and vice versa.)
"""
self.proxy = None
if self.config.has_option('Network', 'https_proxy'):
proxy = self.config.get('Network', 'https_proxy').strip()
if proxy:
if proxy.lower() == 'none':
self.proxy = ''
else:
self.proxy = proxy
return
for key in ['https_proxy', 'HTTPS_PROXY', 'http_proxy']:
if key in os.environ:
self.proxy = os.environ[key]
return
# default to True to preserve behavior of old config files
enable_gconf = getConfigBooleanOrDie(
self.config, 'Network', 'gconf_proxy_lookup', True)
if not (have_gconf and enable_gconf):
return
try:
client = gconf.client_get_default()
if client.get_bool('/system/http_proxy/use_http_proxy'):
host = client.get_string('/system/http_proxy/host')
port = client.get_int('/system/http_proxy/port')
self.proxy = 'http://' + host + ":" + str(port)
except Exception:
pass
def setSSL(self):
self.ssl_ca_certs = []
if self.config.has_option('Network', 'ssl_ca_cert_file'):
self.ssl_ca_certs.append((pycurl.CAINFO,
self.config.get('Network', 'ssl_ca_cert_file')))
if self.config.has_option('Network', 'ssl_ca_cert_dir'):
self.ssl_ca_certs.append((pycurl.CAPATH,
self.config.get('Network', 'ssl_ca_cert_dir')))
def setRepoPaths(self):
self.localroot = UPTRACK_CACHE_DIR
self.local = os.path.join(self.localroot,
self.sysname,
self.arch,
self.release,
self.version)
self.remoteroot = UPDATE_REPO_URL
if self.config.has_option("Network", "update_repo_url"):
remote = self.config.get("Network", "update_repo_url").strip()
if remote:
self.remoteroot = remote
def setUUID(self, uuid):
self.uuid = uuid
self.remote = posixpath.join(self.remoteroot,
urllib.quote(self.accesskey),
"+uuid", urllib.quote(self.uuid))
def setVMInfo(self):
if not hasattr(self, 'vminfo'):
self.vminfo = getVMInfo()
def setIP(self):
"""
Set localip to a dictionary of the form {"IP": "X.X.X.X"}.
If the suppress_ip config option is enabled, set a dummy
address. Otherwise, try to get it from the 'ip' command. Upon failure,
set localip to an error dict of the form {"Error": "error_msg"} instead.
"""
if getConfigBooleanOrDie(self.config, 'Settings', 'suppress_ip', False):
self.localip = {"IP": "0.0.0.0"}
return
try:
proto = rest = hostport = path = userinfo = netloc = port = host = None
uri = self.remoteroot
if self.proxy:
uri = self.proxy
(proto, rest) = urllib.splittype(uri)
# Curl accepts a proxy without leading http(s)://, which
# requires special processing here.
if self.proxy and not rest.startswith("//"):
(proto, rest) = urllib.splittype("http://" + uri)
if rest:
(netloc, path) = urllib.splithost(rest)
if netloc:
(userinfo, hostport) = urllib.splituser(netloc)
if hostport:
(host, port) = urllib.splitport(hostport)
if host:
remoteip = socket.gethostbyname(host)
p = subprocess.Popen(['ip', 'route', 'get', remoteip],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
routedata = p.communicate()[0]
if ' src ' in routedata:
self.localip = {'IP': routedata[routedata.index(' src '):].split()[1]}
else:
self.localip = {'Error': "Could not parse IP address from route data (%s)" % routedata}
else:
self.localip = {'Error': "Could not parse hostname out of remote or proxy (%s)" % uri}
except Exception, e:
self.localip = {'Error': "%s (host = %s, uri = %s)" % (str(e), host, uri)}
def inVirtualBox():
# PCI ID 0x80ee is VirtualBox virtual devices
# http://pci-ids.ucw.cz/read/PC/80ee
try:
for line in file('/proc/bus/pci/devices', 'r'):
fields = line.split()
if fields[1][0:4] == '80ee':
return True
except (IOError, IndexError):
pass
return False
def getVMInfo():
"""Find the UUID of this machine and of any VMs it is hosting."""
vminfo = {}
devnull = open('/dev/null', 'w')
# On a Xen paravirt domU, you get the UUID from /sys/hypervisor/uuid.
# On most other systems (dom0, HVM domU, bare hardware, most other
# virtualization systems) you get the UUID from DMI, but accessing DMI
# fails on a Xen paravirt domU. So we check /sys/hypervisor first.
# Reading /sys/hypervisor/uuid hangs if xenstored hasn't started yet.
# See https://bugzilla.redhat.com/show_bug.cgi?id=225203
# So instead we spin off a child process to do the read, such that
# it's okay if it hangs.
try:
proc = subprocess.Popen(['cat', '/sys/hypervisor/uuid'],
stdout=subprocess.PIPE, stderr=devnull)
except subprocess.CalledProcessError, e:
vminfo['xen_error'] = str(e)
else:
if select.select([proc.stdout], [], [], 1)[0]:
if proc.wait() == 0:
vminfo['uuid'] = proc.stdout.read().strip()
# else: not Xen
else:
vminfo['xen_error'] = 'Read of /sys/hypervisor/uuid timed out; is xenstored running?'
if vminfo.get('uuid') == '00000000-0000-0000-0000-000000000000':
vminfo['type'] = 'Xen dom0'
del vminfo['uuid']
try:
proc = subprocess.Popen(['xenstore-list', '/vm'],
stdout=subprocess.PIPE, stderr=devnull)
vminfo['children'] = proc.communicate()[0].strip().split('\n')
try:
vminfo['children'].remove('00000000-0000-0000-0000-000000000000')
except ValueError:
pass
if proc.wait():
vminfo['xen_error'] = 'xenstore-list /vm returned %d' % proc.returncode
except (IOError, OSError, subprocess.CalledProcessError), e:
vminfo['xen_error'] = str(e)
elif 'uuid' in vminfo:
vminfo['type'] = 'Xen paravirt domU'
# Checks for other virtualization systems would go here
if 'uuid' not in vminfo:
try:
# Bare metal, or Xen HVM domU, or VMware, or KVM
proc = subprocess.Popen(['dmidecode', '-t', 'system'],
stdout=subprocess.PIPE, stderr=devnull)
for line in proc.communicate()[0].split('\n'):
s = line.split("UUID: ", 1)
if len(s) > 1:
vminfo['uuid'] = s[1]
s = line.split("Product Name: ", 1)
if len(s) > 1:
# "HVM domU" is the most interesting value here, but
# no harm in fetching this value unconditionally (it
# shows up in oopses, for instance)
vminfo.setdefault('type', s[1])
if proc.wait():
vminfo['dmidecode_error'] = 'dmidecode -t system returned %d' % proc.returncode
except (IOError, OSError, subprocess.CalledProcessError), e:
vminfo['dmidecode_error'] = str(e)
try:
vminfo['num_containers'] = len(file("/proc/vz/veinfo").readlines())
except:
vminfo['num_containers'] = 0
return vminfo
def getMmapMinAddr():
"""Return the value of `mmap_min_addr` on this machine."""
try:
mmap_min_addr = read_file('/proc/sys/vm/mmap_min_addr').strip()
except:
mmap_min_addr = None
return mmap_min_addr
| mit |
sudheesh001/oh-mainline | vendor/packages/Django/django/contrib/gis/tests/geoapp/test_regress.py | 95 | 3512 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.contrib.gis.tests.utils import no_mysql, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from django.db.models import Count, Min
from django.test import TestCase
from .models import City, PennsylvaniaCity, State, Truth
class GeoRegressionTests(TestCase):
def test_update(self):
"Testing GeoQuerySet.update(). See #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = "Åland Islands"
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.dates('founded', 'day')[0])
self.assertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])
def test_empty_count(self):
"Testing that PostGISAdapter.__eq__ does check empty strings. See #13670."
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name='Pueblo')
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
def test_defer_or_only_with_annotate(self):
"Regression for #16409. Make sure defer() and only() work with annotate()"
self.assertIsInstance(list(City.objects.annotate(Count('point')).defer('name')), list)
self.assertIsInstance(list(City.objects.annotate(Count('point')).only('name')), list)
def test_boolean_conversion(self):
"Testing Boolean value conversion with the spatial backend, see #15169."
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=1).val
val2 = Truth.objects.get(pk=2).val
# verify types -- should't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertEqual(val1, True)
self.assertEqual(val2, False)
| agpl-3.0 |
wevoice/wesub | apps/comments/models.py | 5 | 2796 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.db import models
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.sites.models import Site
from auth.models import CustomUser as User, Awards
from django.conf import settings
from django.db.models.signals import post_save
from django.utils.html import escape, urlize
from localeurl.utils import universal_url
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class Comment(models.Model):
content_type = models.ForeignKey(ContentType,
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField('object ID')
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
user = models.ForeignKey(User)
reply_to = models.ForeignKey('self', blank=True, null=True)
content = models.TextField('comment', max_length=COMMENT_MAX_LENGTH)
submit_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-submit_date',)
def __unicode__(self):
return "%s: %s..." % (self.user.__unicode__(), self.content[:50])
def get_content(self):
content = []
if self.content:
content.append(urlize(escape(self.content)).replace('\n', '<br />'))
content.append('\n')
return ''.join(content)
@classmethod
def get_for_object(self, obj):
if obj.pk:
ct = ContentType.objects.get_for_model(obj)
return self.objects.filter(content_type=ct, object_pk=obj.pk).order_by('submit_date').select_related('user')
else:
return self.objects.none()
def comment_post_save_handler(sender, instance, created, **kwargs):
from messages.tasks import send_video_comment_notification
send_video_comment_notification.delay(instance)
post_save.connect(Awards.on_comment_save, Comment)
post_save.connect(comment_post_save_handler, Comment,
dispatch_uid='notifications')
| agpl-3.0 |
qiankunshe/sky_engine | build/android/gyp/pack_relocations.py | 34 | 3683 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
library files. This step is inserted after the libraries are stripped.
If --enable-packing is zero, the script copies files verbatim, with no
attempt to pack relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import optparse
import os
import shlex
import shutil
import sys
import tempfile
from util import build_utils
def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
shutil.copy(library_path, output_path)
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyLibraryUnchanged(library_path, output_path):
shutil.copy(library_path, output_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--clear-dir', action='store_true',
help='If set, the destination directory will be deleted '
'before copying files to it. This is highly recommended to '
'ensure that no stale files are left in the directory.')
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the relocations packer binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries', action='append',
help='List of libraries')
parser.add_option('--stamp', help='Path to touch on success')
options, _ = parser.parse_args(args)
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(shlex.split(options.exclude_packing_list))
libraries = []
for libs_arg in options.libraries:
libraries += build_utils.ParseGypList(libs_arg)
if options.clear_dir:
build_utils.DeleteDirectory(options.packed_libraries_dir)
build_utils.MakeDirectory(options.packed_libraries_dir)
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(
options.packed_libraries_dir, os.path.basename(library))
if enable_packing and library not in exclude_packing_set:
PackLibraryRelocations(options.android_pack_relocations,
library_path,
output_path)
else:
CopyLibraryUnchanged(library_path, output_path)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
Rentier/manganese-harvest-core | harvest/io.py | 1 | 1349 | import numpy as np
def constant_from_file(path):
"""
The data format used consists of a single
line, which contains an integer C with
0 < C <= 100.
An example for a valid input file:
42
"""
with open(path, 'r') as f:
return int(f.readline())
def positions_from_file(path):
"""
The expected file format is structured the
following: The first line describes the
number c of robots. After that, the next c
lines are coordinates for the robots in the
format x y . Valid values for x and y are
non-negative integers.
An example for a valid input file:
3
0 0
1 1
2 1
"""
n = 0
with open(path, 'r') as f:
n = int(f.readline())
data = np.empty([n, 2], dtype=int)
for i, line in enumerate(f):
p = [int(x) for x in line.split()]
assert len(p) == 2
data[i,0], data[i,1] = p
return n, data
def positions_to_file(path, data):
"""
Same file format as 'Read positions'.
"""
n = len(data)
with open(path, 'w') as f:
f.write("{}\n".format(n))
for x, y in data:
f.write("{} {}\n".format(x, y))
def mission_to_file(path, s_time, d_time, s_trav, d_trav, s_coll, d_coll):
with open(path, 'w') as f:
f.write("{}\n".format(s_time))
f.write("{}\n".format(s_trav))
f.write("{}\n".format(s_coll))
f.write("{}\n".format(d_time))
f.write("{}\n".format(d_trav))
f.write("{}\n".format(d_coll)) | apache-2.0 |
jumping/Diamond | src/collectors/pgq/test/testpgq.py | 31 | 1968 | #!/usr/bin/python
from test import CollectorTestCase, get_collector_config
from mock import MagicMock, patch
from pgq import PgQCollector
class TestPgQCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PgQCollector', {})
self.collector = PgQCollector(config, None)
def test_import(self):
self.assertTrue(PgQCollector)
@patch.object(PgQCollector, 'publish')
@patch.object(PgQCollector, 'get_consumer_info')
@patch.object(PgQCollector, 'get_queue_info')
def test_collect(self, get_queue_info, get_consumer_info, publish):
get_queue_info.return_value = iter([
('q1', {
'ticker_lag': 1,
'ev_per_sec': 2,
}),
('q2', {
'ticker_lag': 3,
'ev_per_sec': 4,
}),
])
get_consumer_info.return_value = iter([
('q1', 'c1', {
'lag': 1,
'pending_events': 2,
'last_seen': 3,
}),
('q2', 'c1', {
'lag': 4,
'pending_events': 5,
'last_seen': 6,
}),
])
self.collector._collect_for_instance('db1', connection=MagicMock())
self.assertPublished(publish, 'db1.q1.ticker_lag', 1)
self.assertPublished(publish, 'db1.q1.ev_per_sec', 2)
self.assertPublished(publish, 'db1.q2.ticker_lag', 3)
self.assertPublished(publish, 'db1.q2.ev_per_sec', 4)
self.assertPublished(publish, 'db1.q1.consumers.c1.lag', 1)
self.assertPublished(publish, 'db1.q1.consumers.c1.pending_events', 2)
self.assertPublished(publish, 'db1.q1.consumers.c1.last_seen', 3)
self.assertPublished(publish, 'db1.q2.consumers.c1.lag', 4)
self.assertPublished(publish, 'db1.q2.consumers.c1.pending_events', 5)
self.assertPublished(publish, 'db1.q2.consumers.c1.last_seen', 6)
| mit |
bregman-arie/ansible | test/units/modules/network/dellos10/dellos10_module.py | 60 | 2530 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestDellos10Module(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
else:
self.assertEqual(commands, result['updates'], result['updates'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
onshape-public/onshape-clients | python/onshape_client/oas/models/btp_expression_operator244_all_of.py | 1 | 8146 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_expression9
except ImportError:
btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"]
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPExpressionOperator244AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("operator",): {
"NONE": "NONE",
"PLUS": "PLUS",
"MINUS": "MINUS",
"TIMES": "TIMES",
"DIVIDE": "DIVIDE",
"MODULUS": "MODULUS",
"POWER": "POWER",
"NEGATE": "NEGATE",
"OR": "OR",
"AND": "AND",
"NOT": "NOT",
"EQUAL_TO": "EQUAL_TO",
"NOT_EQUAL_TO": "NOT_EQUAL_TO",
"GREATER": "GREATER",
"LESS": "LESS",
"GREATER_OR_EQUAL": "GREATER_OR_EQUAL",
"LESS_OR_EQUAL": "LESS_OR_EQUAL",
"CONCATENATE": "CONCATENATE",
"CONDITIONAL": "CONDITIONAL",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"for_export": (bool,), # noqa: E501
"global_namespace": (bool,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501
"operand1": (btp_expression9.BTPExpression9,), # noqa: E501
"operand2": (btp_expression9.BTPExpression9,), # noqa: E501
"operand3": (btp_expression9.BTPExpression9,), # noqa: E501
"operator": (str,), # noqa: E501
"space_after_namespace": (btp_space10.BTPSpace10,), # noqa: E501
"space_after_operator": (btp_space10.BTPSpace10,), # noqa: E501
"space_before_operator": (btp_space10.BTPSpace10,), # noqa: E501
"written_as_function_call": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"for_export": "forExport", # noqa: E501
"global_namespace": "globalNamespace", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"namespace": "namespace", # noqa: E501
"operand1": "operand1", # noqa: E501
"operand2": "operand2", # noqa: E501
"operand3": "operand3", # noqa: E501
"operator": "operator", # noqa: E501
"space_after_namespace": "spaceAfterNamespace", # noqa: E501
"space_after_operator": "spaceAfterOperator", # noqa: E501
"space_before_operator": "spaceBeforeOperator", # noqa: E501
"written_as_function_call": "writtenAsFunctionCall", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_expression_operator244_all_of.BTPExpressionOperator244AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
for_export (bool): [optional] # noqa: E501
global_namespace (bool): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
namespace ([btp_identifier8.BTPIdentifier8]): [optional] # noqa: E501
operand1 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operand2 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operand3 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operator (str): [optional] # noqa: E501
space_after_namespace (btp_space10.BTPSpace10): [optional] # noqa: E501
space_after_operator (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before_operator (btp_space10.BTPSpace10): [optional] # noqa: E501
written_as_function_call (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit |
Edraak/circleci-edx-platform | lms/djangoapps/django_comment_client/base/urls.py | 63 | 2810 | """
Base urls for the django_comment_client.
"""
from django.conf.urls import url, patterns
urlpatterns = patterns(
'django_comment_client.base.views',
url(r'upload$', 'upload', name='upload'),
url(r'threads/(?P<thread_id>[\w\-]+)/update$', 'update_thread', name='update_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/reply$', 'create_comment', name='create_comment'),
url(r'threads/(?P<thread_id>[\w\-]+)/delete', 'delete_thread', name='delete_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/upvote$', 'vote_for_thread', {'value': 'up'}, name='upvote_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/downvote$', 'vote_for_thread', {'value': 'down'}, name='downvote_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/flagAbuse$', 'flag_abuse_for_thread', name='flag_abuse_for_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/unFlagAbuse$', 'un_flag_abuse_for_thread', name='un_flag_abuse_for_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/unvote$', 'undo_vote_for_thread', name='undo_vote_for_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/pin$', 'pin_thread', name='pin_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/unpin$', 'un_pin_thread', name='un_pin_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/follow$', 'follow_thread', name='follow_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/unfollow$', 'unfollow_thread', name='unfollow_thread'),
url(r'threads/(?P<thread_id>[\w\-]+)/close$', 'openclose_thread', name='openclose_thread'),
url(r'comments/(?P<comment_id>[\w\-]+)/update$', 'update_comment', name='update_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/endorse$', 'endorse_comment', name='endorse_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/reply$', 'create_sub_comment', name='create_sub_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/delete$', 'delete_comment', name='delete_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/upvote$', 'vote_for_comment', {'value': 'up'}, name='upvote_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/downvote$', 'vote_for_comment', {'value': 'down'}, name='downvote_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/unvote$', 'undo_vote_for_comment', name='undo_vote_for_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/flagAbuse$', 'flag_abuse_for_comment', name='flag_abuse_for_comment'),
url(r'comments/(?P<comment_id>[\w\-]+)/unFlagAbuse$', 'un_flag_abuse_for_comment', name='un_flag_abuse_for_comment'),
url(r'^(?P<commentable_id>[\w\-.]+)/threads/create$', 'create_thread', name='create_thread'),
url(r'^(?P<commentable_id>[\w\-.]+)/follow$', 'follow_commentable', name='follow_commentable'),
url(r'^(?P<commentable_id>[\w\-.]+)/unfollow$', 'unfollow_commentable', name='unfollow_commentable'),
url(r'users$', 'users', name='users'),
)
| agpl-3.0 |
40223151/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/unittest/test/support.py | 770 | 3379 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| gpl-3.0 |
evansd/django | django/views/decorators/debug.py | 86 | 2569 | import functools
from django.http import HttpRequest
def sensitive_variables(*variables):
"""
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case consider all
variables are sensitive:
@sensitive_variables()
def my_function()
...
"""
def decorator(func):
@functools.wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = '__ALL__'
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator
def sensitive_post_parameters(*parameters):
"""
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case consider all
variables are sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
def decorator(view):
@functools.wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), (
"sensitive_post_parameters didn't receive an HttpRequest. "
"If you are decorating a classmethod, be sure to use "
"@method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = '__ALL__'
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator
| bsd-3-clause |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/xml/__init__.py | 75 | 1169 | """Core XML support for Python.
This package contains four sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
etree -- The ElementTree XML library. This is a subset of the full
ElementTree XML release.
"""
__all__ = ["dom", "parsers", "sax", "etree"]
# When being checked-out without options, this has the form
# "<dollar>Revision: x.y </dollar>"
# When exported using -kv, it is "x.y".
__version__ = "$Revision: 41660 $".split()[-2:][0]
_MINIMUM_XMLPLUS_VERSION = (0, 8, 4)
try:
import _xmlplus
except ImportError:
pass
else:
try:
v = _xmlplus.version_info
except AttributeError:
# _xmlplus is too old; ignore it
pass
else:
if v >= _MINIMUM_XMLPLUS_VERSION:
import sys
_xmlplus.__path__.extend(__path__)
sys.modules[__name__] = _xmlplus
else:
del v
| apache-2.0 |
sfaleron/PolygonTiles | Pmw.py | 1 | 311333 |
### Loader functions:
_VERSION = '1.3.3'
def setversion(version):
if version != _VERSION:
raise ValueError, 'Dynamic versioning not available'
def setalphaversions(*alpha_versions):
if alpha_versions != ():
raise ValueError, 'Dynamic versioning not available'
def version(alpha = 0):
if alpha:
return ()
else:
return _VERSION
def installedversions(alpha = 0):
if alpha:
return ()
else:
return (_VERSION,)
######################################################################
### File: PmwBase.py
# Pmw megawidget base classes.
# This module provides a foundation for building megawidgets. It
# contains the MegaArchetype class which manages component widgets and
# configuration options. Also provided are the MegaToplevel and
# MegaWidget classes, derived from the MegaArchetype class. The
# MegaToplevel class contains a Tkinter Toplevel widget to act as the
# container of the megawidget. This is used as the base class of all
# megawidgets that are contained in their own top level window, such
# as a Dialog window. The MegaWidget class contains a Tkinter Frame
# to act as the container of the megawidget. This is used as the base
# class of all other megawidgets, such as a ComboBox or ButtonBox.
#
# Megawidgets are built by creating a class that inherits from either
# the MegaToplevel or MegaWidget class.
import os
import string
import sys
import traceback
import types
import Tkinter
# Special values used in index() methods of several megawidgets.
END = ['end']
SELECT = ['select']
DEFAULT = ['default']
# Constant used to indicate that an option can only be set by a call
# to the constructor.
INITOPT = ['initopt']
_DEFAULT_OPTION_VALUE = ['default_option_value']
_useTkOptionDb = 0
# Symbolic constants for the indexes into an optionInfo list.
_OPT_DEFAULT = 0
_OPT_VALUE = 1
_OPT_FUNCTION = 2
# Stacks
_busyStack = []
# Stack which tracks nested calls to show/hidebusycursor (called
# either directly or from activate()/deactivate()). Each element
# is a dictionary containing:
# 'newBusyWindows' : List of windows which had busy_hold called
# on them during a call to showbusycursor().
# The corresponding call to hidebusycursor()
# will call busy_release on these windows.
# 'busyFocus' : The blt _Busy window which showbusycursor()
# set the focus to.
# 'previousFocus' : The focus as it was when showbusycursor()
# was called. The corresponding call to
# hidebusycursor() will restore this focus if
# the focus has not been changed from busyFocus.
_grabStack = []
# Stack of grabbed windows. It tracks calls to push/popgrab()
# (called either directly or from activate()/deactivate()). The
# window on the top of the stack is the window currently with the
# grab. Each element is a dictionary containing:
# 'grabWindow' : The window grabbed by pushgrab(). The
# corresponding call to popgrab() will release
# the grab on this window and restore the grab
# on the next window in the stack (if there is one).
# 'globalMode' : True if the grabWindow was grabbed with a
# global grab, false if the grab was local
# and 'nograb' if no grab was performed.
# 'previousFocus' : The focus as it was when pushgrab()
# was called. The corresponding call to
# popgrab() will restore this focus.
# 'deactivateFunction' :
# The function to call (usually grabWindow.deactivate) if
# popgrab() is called (usually from a deactivate() method)
# on a window which is not at the top of the stack (that is,
# does not have the grab or focus). For example, if a modal
# dialog is deleted by the window manager or deactivated by
# a timer. In this case, all dialogs above and including
# this one are deactivated, starting at the top of the
# stack.
# Note that when dealing with focus windows, the name of the Tk
# widget is used, since it may be the '_Busy' window, which has no
# python instance associated with it.
#=============================================================================
# Functions used to forward methods from a class to a component.
# Fill in a flattened method resolution dictionary for a class (attributes are
# filtered out). Flattening honours the MI method resolution rules
# (depth-first search of bases in order). The dictionary has method names
# for keys and functions for values.
def __methodDict(cls, dict):
# the strategy is to traverse the class in the _reverse_ of the normal
# order, and overwrite any duplicates.
baseList = list(cls.__bases__)
baseList.reverse()
# do bases in reverse order, so first base overrides last base
for super in baseList:
__methodDict(super, dict)
# do my methods last to override base classes
for key, value in cls.__dict__.items():
# ignore class attributes
if type(value) == types.FunctionType:
dict[key] = value
def __methods(cls):
# Return all method names for a class.
# Return all method names for a class (attributes are filtered
# out). Base classes are searched recursively.
dict = {}
__methodDict(cls, dict)
return dict.keys()
# Function body to resolve a forwarding given the target method name and the
# attribute name. The resulting lambda requires only self, but will forward
# any other parameters.
__stringBody = (
'def %(method)s(this, *args, **kw): return ' +
#'apply(this.%(attribute)s.%(method)s, args, kw)')
'this.%(attribute)s.%(method)s(*args, **kw)')
# Get a unique id
__counter = 0
def __unique():
global __counter
__counter = __counter + 1
return str(__counter)
# Function body to resolve a forwarding given the target method name and the
# index of the resolution function. The resulting lambda requires only self,
# but will forward any other parameters. The target instance is identified
# by invoking the resolution function.
__funcBody = (
'def %(method)s(this, *args, **kw): return ' +
#'apply(this.%(forwardFunc)s().%(method)s, args, kw)')
'this.%(forwardFunc)s().%(method)s(*args, **kw)');
def forwardmethods(fromClass, toClass, toPart, exclude = ()):
# Forward all methods from one class to another.
# Forwarders will be created in fromClass to forward method
# invocations to toClass. The methods to be forwarded are
# identified by flattening the interface of toClass, and excluding
# methods identified in the exclude list. Methods already defined
# in fromClass, or special methods with one or more leading or
# trailing underscores will not be forwarded.
# For a given object of class fromClass, the corresponding toClass
# object is identified using toPart. This can either be a String
# denoting an attribute of fromClass objects, or a function taking
# a fromClass object and returning a toClass object.
# Example:
# class MyClass:
# ...
# def __init__(self):
# ...
# self.__target = TargetClass()
# ...
# def findtarget(self):
# return self.__target
# forwardmethods(MyClass, TargetClass, '__target', ['dangerous1', 'dangerous2'])
# # ...or...
# forwardmethods(MyClass, TargetClass, MyClass.findtarget,
# ['dangerous1', 'dangerous2'])
# In both cases, all TargetClass methods will be forwarded from
# MyClass except for dangerous1, dangerous2, special methods like
# __str__, and pre-existing methods like findtarget.
# Allow an attribute name (String) or a function to determine the instance
if type(toPart) != types.StringType:
# check that it is something like a function
if callable(toPart):
# If a method is passed, use the function within it
if hasattr(toPart, 'im_func'):
toPart = toPart.im_func
# After this is set up, forwarders in this class will use
# the forwarding function. The forwarding function name is
# guaranteed to be unique, so that it can't be hidden by subclasses
forwardName = '__fwdfunc__' + __unique()
fromClass.__dict__[forwardName] = toPart
# It's not a valid type
else:
raise TypeError, 'toPart must be attribute name, function or method'
# get the full set of candidate methods
dict = {}
__methodDict(toClass, dict)
# discard special methods
for ex in dict.keys():
if ex[:1] == '_' or ex[-1:] == '_':
del dict[ex]
# discard dangerous methods supplied by the caller
for ex in exclude:
if dict.has_key(ex):
del dict[ex]
# discard methods already defined in fromClass
for ex in __methods(fromClass):
if dict.has_key(ex):
del dict[ex]
for method, func in dict.items():
d = {'method': method, 'func': func}
if type(toPart) == types.StringType:
execString = \
__stringBody % {'method' : method, 'attribute' : toPart}
else:
execString = \
__funcBody % {'forwardFunc' : forwardName, 'method' : method}
exec execString in d
# this creates a method
fromClass.__dict__[method] = d[method]
#=============================================================================
def setgeometryanddeiconify(window, geom):
# To avoid flashes on X and to position the window correctly on NT
# (caused by Tk bugs).
if os.name == 'nt' or \
(os.name == 'posix' and sys.platform[:6] == 'cygwin'):
# Require overrideredirect trick to stop window frame
# appearing momentarily.
redirect = window.overrideredirect()
if not redirect:
window.overrideredirect(1)
window.deiconify()
if geom is not None:
window.geometry(geom)
# Call update_idletasks to ensure NT moves the window to the
# correct position it is raised.
window.update_idletasks()
window.tkraise()
if not redirect:
window.overrideredirect(0)
else:
if geom is not None:
window.geometry(geom)
# Problem!? Which way around should the following two calls
# go? If deiconify() is called first then I get complaints
# from people using the enlightenment or sawfish window
# managers that when a dialog is activated it takes about 2
# seconds for the contents of the window to appear. But if
# tkraise() is called first then I get complaints from people
# using the twm window manager that when a dialog is activated
# it appears in the top right corner of the screen and also
# takes about 2 seconds to appear.
#window.tkraise()
# Call update_idletasks to ensure certain window managers (eg:
# enlightenment and sawfish) do not cause Tk to delay for
# about two seconds before displaying window.
#window.update_idletasks()
#window.deiconify()
window.deiconify()
if window.overrideredirect():
# The window is not under the control of the window manager
# and so we need to raise it ourselves.
window.tkraise()
#=============================================================================
class MegaArchetype:
# Megawidget abstract root class.
# This class provides methods which are inherited by classes
# implementing useful bases (this class doesn't provide a
# container widget inside which the megawidget can be built).
def __init__(self, parent = None, hullClass = None):
# Mapping from each megawidget option to a list of information
# about the option
# - default value
# - current value
# - function to call when the option is initialised in the
# call to initialiseoptions() in the constructor or
# modified via configure(). If this is INITOPT, the
# option is an initialisation option (an option that can
# be set by the call to the constructor but can not be
# used with configure).
# This mapping is not initialised here, but in the call to
# defineoptions() which precedes construction of this base class.
#
# self._optionInfo = {}
# Mapping from each component name to a tuple of information
# about the component.
# - component widget instance
# - configure function of widget instance
# - the class of the widget (Frame, EntryField, etc)
# - cget function of widget instance
# - the name of the component group of this component, if any
self.__componentInfo = {}
# Mapping from alias names to the names of components or
# sub-components.
self.__componentAliases = {}
# Contains information about the keywords provided to the
# constructor. It is a mapping from the keyword to a tuple
# containing:
# - value of keyword
# - a boolean indicating if the keyword has been used.
# A keyword is used if, during the construction of a megawidget,
# - it is defined in a call to defineoptions() or addoptions(), or
# - it references, by name, a component of the megawidget, or
# - it references, by group, at least one component
# At the end of megawidget construction, a call is made to
# initialiseoptions() which reports an error if there are
# unused options given to the constructor.
#
# After megawidget construction, the dictionary contains
# keywords which refer to a dynamic component group, so that
# these components can be created after megawidget
# construction and still use the group options given to the
# constructor.
#
# self._constructorKeywords = {}
# List of dynamic component groups. If a group is included in
# this list, then it not an error if a keyword argument for
# the group is given to the constructor or to configure(), but
# no components with this group have been created.
# self._dynamicGroups = ()
if hullClass is None:
self._hull = None
else:
if parent is None:
parent = Tkinter._default_root
# Create the hull.
self._hull = self.createcomponent('hull',
(), None,
hullClass, (parent,))
_hullToMegaWidget[self._hull] = self
if _useTkOptionDb:
# Now that a widget has been created, query the Tk
# option database to get the default values for the
# options which have not been set in the call to the
# constructor. This assumes that defineoptions() is
# called before the __init__().
option_get = self.option_get
_VALUE = _OPT_VALUE
_DEFAULT = _OPT_DEFAULT
for name, info in self._optionInfo.items():
value = info[_VALUE]
if value is _DEFAULT_OPTION_VALUE:
resourceClass = string.upper(name[0]) + name[1:]
value = option_get(name, resourceClass)
if value != '':
try:
# Convert the string to int/float/tuple, etc
value = eval(value, {'__builtins__': {}})
except:
pass
info[_VALUE] = value
else:
info[_VALUE] = info[_DEFAULT]
def destroy(self):
# Clean up optionInfo in case it contains circular references
# in the function field, such as self._settitle in class
# MegaToplevel.
self._optionInfo = {}
if self._hull is not None:
del _hullToMegaWidget[self._hull]
self._hull.destroy()
#======================================================================
# Methods used (mainly) during the construction of the megawidget.
def defineoptions(self, keywords, optionDefs, dynamicGroups = ()):
# Create options, providing the default value and the method
# to call when the value is changed. If any option created by
# base classes has the same name as one in <optionDefs>, the
# base class's value and function will be overriden.
# This should be called before the constructor of the base
# class, so that default values defined in the derived class
# override those in the base class.
if not hasattr(self, '_constructorKeywords'):
# First time defineoptions has been called.
tmp = {}
for option, value in keywords.items():
tmp[option] = [value, 0]
self._constructorKeywords = tmp
self._optionInfo = {}
self._initialiseoptions_counter = 0
self._initialiseoptions_counter = self._initialiseoptions_counter + 1
if not hasattr(self, '_dynamicGroups'):
self._dynamicGroups = ()
self._dynamicGroups = self._dynamicGroups + tuple(dynamicGroups)
self.addoptions(optionDefs)
def addoptions(self, optionDefs):
# Add additional options, providing the default value and the
# method to call when the value is changed. See
# "defineoptions" for more details
# optimisations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.has_key
keywords = self._constructorKeywords
keywords_has_key = keywords.has_key
FUNCTION = _OPT_FUNCTION
for name, default, function in optionDefs:
if '_' not in name:
# The option will already exist if it has been defined
# in a derived class. In this case, do not override the
# default value of the option or the callback function
# if it is not None.
if not optionInfo_has_key(name):
if keywords_has_key(name):
value = keywords[name][0]
optionInfo[name] = [default, value, function]
del keywords[name]
else:
if _useTkOptionDb:
optionInfo[name] = \
[default, _DEFAULT_OPTION_VALUE, function]
else:
optionInfo[name] = [default, default, function]
elif optionInfo[name][FUNCTION] is None:
optionInfo[name][FUNCTION] = function
else:
# This option is of the form "component_option". If this is
# not already defined in self._constructorKeywords add it.
# This allows a derived class to override the default value
# of an option of a component of a base class.
if not keywords_has_key(name):
keywords[name] = [default, 0]
def createcomponent(self, componentName, componentAliases,
componentGroup, widgetClass, *widgetArgs, **kw):
# Create a component (during construction or later).
if self.__componentInfo.has_key(componentName):
raise ValueError, 'Component "%s" already exists' % componentName
if '_' in componentName:
raise ValueError, \
'Component name "%s" must not contain "_"' % componentName
if hasattr(self, '_constructorKeywords'):
keywords = self._constructorKeywords
else:
keywords = {}
for alias, component in componentAliases:
# Create aliases to the component and its sub-components.
index = string.find(component, '_')
if index < 0:
self.__componentAliases[alias] = (component, None)
else:
mainComponent = component[:index]
subComponent = component[(index + 1):]
self.__componentAliases[alias] = (mainComponent, subComponent)
# Remove aliases from the constructor keyword arguments by
# replacing any keyword arguments that begin with *alias*
# with corresponding keys beginning with *component*.
alias = alias + '_'
aliasLen = len(alias)
for option in keywords.keys():
if len(option) > aliasLen and option[:aliasLen] == alias:
newkey = component + '_' + option[aliasLen:]
keywords[newkey] = keywords[option]
del keywords[option]
componentPrefix = componentName + '_'
nameLen = len(componentPrefix)
for option in keywords.keys():
if len(option) > nameLen and option[:nameLen] == componentPrefix:
# The keyword argument refers to this component, so add
# this to the options to use when constructing the widget.
kw[option[nameLen:]] = keywords[option][0]
del keywords[option]
else:
# Check if this keyword argument refers to the group
# of this component. If so, add this to the options
# to use when constructing the widget. Mark the
# keyword argument as being used, but do not remove it
# since it may be required when creating another
# component.
index = string.find(option, '_')
if index >= 0 and componentGroup == option[:index]:
rest = option[(index + 1):]
kw[rest] = keywords[option][0]
keywords[option][1] = 1
if kw.has_key('pyclass'):
widgetClass = kw['pyclass']
del kw['pyclass']
if widgetClass is None:
return None
if len(widgetArgs) == 1 and type(widgetArgs[0]) == types.TupleType:
# Arguments to the constructor can be specified as either
# multiple trailing arguments to createcomponent() or as a
# single tuple argument.
widgetArgs = widgetArgs[0]
widget = apply(widgetClass, widgetArgs, kw)
componentClass = widget.__class__.__name__
self.__componentInfo[componentName] = (widget, widget.configure,
componentClass, widget.cget, componentGroup)
return widget
def destroycomponent(self, name):
# Remove a megawidget component.
# This command is for use by megawidget designers to destroy a
# megawidget component.
self.__componentInfo[name][0].destroy()
del self.__componentInfo[name]
def createlabel(self, parent, childCols = 1, childRows = 1):
labelpos = self['labelpos']
labelmargin = self['labelmargin']
if labelpos is None:
return
label = self.createcomponent('label',
(), None,
Tkinter.Label, (parent,))
if labelpos[0] in 'ns':
# vertical layout
if labelpos[0] == 'n':
row = 0
margin = 1
else:
row = childRows + 3
margin = row - 1
label.grid(column=2, row=row, columnspan=childCols, sticky=labelpos)
parent.grid_rowconfigure(margin, minsize=labelmargin)
else:
# horizontal layout
if labelpos[0] == 'w':
col = 0
margin = 1
else:
col = childCols + 3
margin = col - 1
label.grid(column=col, row=2, rowspan=childRows, sticky=labelpos)
parent.grid_columnconfigure(margin, minsize=labelmargin)
def initialiseoptions(self, dummy = None):
self._initialiseoptions_counter = self._initialiseoptions_counter - 1
if self._initialiseoptions_counter == 0:
unusedOptions = []
keywords = self._constructorKeywords
for name in keywords.keys():
used = keywords[name][1]
if not used:
# This keyword argument has not been used. If it
# does not refer to a dynamic group, mark it as
# unused.
index = string.find(name, '_')
if index < 0 or name[:index] not in self._dynamicGroups:
unusedOptions.append(name)
if len(unusedOptions) > 0:
if len(unusedOptions) == 1:
text = 'Unknown option "'
else:
text = 'Unknown options "'
raise KeyError, text + string.join(unusedOptions, ', ') + \
'" for ' + self.__class__.__name__
# Call the configuration callback function for every option.
FUNCTION = _OPT_FUNCTION
for info in self._optionInfo.values():
func = info[FUNCTION]
if func is not None and func is not INITOPT:
func()
#======================================================================
# Method used to configure the megawidget.
def configure(self, option=None, **kw):
# Query or configure the megawidget options.
#
# If not empty, *kw* is a dictionary giving new
# values for some of the options of this megawidget or its
# components. For options defined for this megawidget, set
# the value of the option to the new value and call the
# configuration callback function, if any. For options of the
# form <component>_<option>, where <component> is a component
# of this megawidget, call the configure method of the
# component giving it the new value of the option. The
# <component> part may be an alias or a component group name.
#
# If *option* is None, return all megawidget configuration
# options and settings. Options are returned as standard 5
# element tuples
#
# If *option* is a string, return the 5 element tuple for the
# given configuration option.
# First, deal with the option queries.
if len(kw) == 0:
# This configure call is querying the values of one or all options.
# Return 5-tuples:
# (optionName, resourceName, resourceClass, default, value)
if option is None:
rtn = {}
for option, config in self._optionInfo.items():
resourceClass = string.upper(option[0]) + option[1:]
rtn[option] = (option, option, resourceClass,
config[_OPT_DEFAULT], config[_OPT_VALUE])
return rtn
else:
config = self._optionInfo[option]
resourceClass = string.upper(option[0]) + option[1:]
return (option, option, resourceClass, config[_OPT_DEFAULT],
config[_OPT_VALUE])
# optimisations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.has_key
componentInfo = self.__componentInfo
componentInfo_has_key = componentInfo.has_key
componentAliases = self.__componentAliases
componentAliases_has_key = componentAliases.has_key
VALUE = _OPT_VALUE
FUNCTION = _OPT_FUNCTION
# This will contain a list of options in *kw* which
# are known to this megawidget.
directOptions = []
# This will contain information about the options in
# *kw* of the form <component>_<option>, where
# <component> is a component of this megawidget. It is a
# dictionary whose keys are the configure method of each
# component and whose values are a dictionary of options and
# values for the component.
indirectOptions = {}
indirectOptions_has_key = indirectOptions.has_key
for option, value in kw.items():
if optionInfo_has_key(option):
# This is one of the options of this megawidget.
# Make sure it is not an initialisation option.
if optionInfo[option][FUNCTION] is INITOPT:
raise KeyError, \
'Cannot configure initialisation option "' \
+ option + '" for ' + self.__class__.__name__
optionInfo[option][VALUE] = value
directOptions.append(option)
else:
index = string.find(option, '_')
if index >= 0:
# This option may be of the form <component>_<option>.
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if componentAliases_has_key(component):
component, subComponent = componentAliases[component]
if subComponent is not None:
componentOption = subComponent + '_' \
+ componentOption
# Expand option string to write on error
option = component + '_' + componentOption
if componentInfo_has_key(component):
# Configure the named component
componentConfigFuncs = [componentInfo[component][1]]
else:
# Check if this is a group name and configure all
# components in the group.
componentConfigFuncs = []
for info in componentInfo.values():
if info[4] == component:
componentConfigFuncs.append(info[1])
if len(componentConfigFuncs) == 0 and \
component not in self._dynamicGroups:
raise KeyError, 'Unknown option "' + option + \
'" for ' + self.__class__.__name__
# Add the configure method(s) (may be more than
# one if this is configuring a component group)
# and option/value to dictionary.
for componentConfigFunc in componentConfigFuncs:
if not indirectOptions_has_key(componentConfigFunc):
indirectOptions[componentConfigFunc] = {}
indirectOptions[componentConfigFunc][componentOption] \
= value
else:
raise KeyError, 'Unknown option "' + option + \
'" for ' + self.__class__.__name__
# Call the configure methods for any components.
#apply has been deprecated since python 2.3
#map(apply, indirectOptions.keys(),
# ((),) * len(indirectOptions), indirectOptions.values())
for func in indirectOptions.keys():
func( **indirectOptions[func])
# Call the configuration callback function for each option.
for option in directOptions:
info = optionInfo[option]
func = info[_OPT_FUNCTION]
if func is not None:
func()
def __setitem__(self, key, value):
#apply has been deprecated since Python 2.3
#apply(self.configure, (), {key: value})
self.configure(*(), **{key: value})
#======================================================================
# Methods used to query the megawidget.
def component(self, name):
# Return a component widget of the megawidget given the
# component's name
# This allows the user of a megawidget to access and configure
# widget components directly.
# Find the main component and any subcomponents
index = string.find(name, '_')
if index < 0:
component = name
remainingComponents = None
else:
component = name[:index]
remainingComponents = name[(index + 1):]
# Expand component alias
if self.__componentAliases.has_key(component):
component, subComponent = self.__componentAliases[component]
if subComponent is not None:
if remainingComponents is None:
remainingComponents = subComponent
else:
remainingComponents = subComponent + '_' \
+ remainingComponents
widget = self.__componentInfo[component][0]
if remainingComponents is None:
return widget
else:
return widget.component(remainingComponents)
def interior(self):
return self._hull
def hulldestroyed(self):
return not _hullToMegaWidget.has_key(self._hull)
def __str__(self):
return str(self._hull)
def cget(self, option):
# Get current configuration setting.
# Return the value of an option, for example myWidget['font'].
if self._optionInfo.has_key(option):
return self._optionInfo[option][_OPT_VALUE]
else:
index = string.find(option, '_')
if index >= 0:
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if self.__componentAliases.has_key(component):
component, subComponent = self.__componentAliases[component]
if subComponent is not None:
componentOption = subComponent + '_' + componentOption
# Expand option string to write on error
option = component + '_' + componentOption
if self.__componentInfo.has_key(component):
# Call cget on the component.
componentCget = self.__componentInfo[component][3]
return componentCget(componentOption)
else:
# If this is a group name, call cget for one of
# the components in the group.
for info in self.__componentInfo.values():
if info[4] == component:
componentCget = info[3]
return componentCget(componentOption)
raise KeyError, 'Unknown option "' + option + \
'" for ' + self.__class__.__name__
__getitem__ = cget
def isinitoption(self, option):
return self._optionInfo[option][_OPT_FUNCTION] is INITOPT
def options(self):
options = []
if hasattr(self, '_optionInfo'):
for option, info in self._optionInfo.items():
isinit = info[_OPT_FUNCTION] is INITOPT
default = info[_OPT_DEFAULT]
options.append((option, default, isinit))
options.sort()
return options
def components(self):
# Return a list of all components.
# This list includes the 'hull' component and all widget subcomponents
names = self.__componentInfo.keys()
names.sort()
return names
def componentaliases(self):
# Return a list of all component aliases.
componentAliases = self.__componentAliases
names = componentAliases.keys()
names.sort()
rtn = []
for alias in names:
(mainComponent, subComponent) = componentAliases[alias]
if subComponent is None:
rtn.append((alias, mainComponent))
else:
rtn.append((alias, mainComponent + '_' + subComponent))
return rtn
def componentgroup(self, name):
return self.__componentInfo[name][4]
#=============================================================================
# The grab functions are mainly called by the activate() and
# deactivate() methods.
#
# Use pushgrab() to add a new window to the grab stack. This
# releases the grab by the window currently on top of the stack (if
# there is one) and gives the grab and focus to the new widget.
#
# To remove the grab from the window on top of the grab stack, call
# popgrab().
#
# Use releasegrabs() to release the grab and clear the grab stack.
def pushgrab(grabWindow, globalMode, deactivateFunction):
prevFocus = grabWindow.tk.call('focus')
grabInfo = {
'grabWindow' : grabWindow,
'globalMode' : globalMode,
'previousFocus' : prevFocus,
'deactivateFunction' : deactivateFunction,
}
_grabStack.append(grabInfo)
_grabtop()
grabWindow.focus_set()
def popgrab(window):
# Return the grab to the next window in the grab stack, if any.
# If this window is not at the top of the grab stack, then it has
# just been deleted by the window manager or deactivated by a
# timer. Call the deactivate method for the modal dialog above
# this one on the stack.
if _grabStack[-1]['grabWindow'] != window:
for index in range(len(_grabStack)):
if _grabStack[index]['grabWindow'] == window:
_grabStack[index + 1]['deactivateFunction']()
break
grabInfo = _grabStack[-1]
del _grabStack[-1]
topWidget = grabInfo['grabWindow']
prevFocus = grabInfo['previousFocus']
globalMode = grabInfo['globalMode']
if globalMode != 'nograb':
topWidget.grab_release()
if len(_grabStack) > 0:
_grabtop()
if prevFocus != '':
try:
topWidget.tk.call('focus', prevFocus)
except Tkinter.TclError:
# Previous focus widget has been deleted. Set focus
# to root window.
Tkinter._default_root.focus_set()
else:
# Make sure that focus does not remain on the released widget.
if len(_grabStack) > 0:
topWidget = _grabStack[-1]['grabWindow']
topWidget.focus_set()
else:
Tkinter._default_root.focus_set()
def grabstacktopwindow():
if len(_grabStack) == 0:
return None
else:
return _grabStack[-1]['grabWindow']
def releasegrabs():
# Release grab and clear the grab stack.
current = Tkinter._default_root.grab_current()
if current is not None:
current.grab_release()
_grabStack[:] = []
def _grabtop():
grabInfo = _grabStack[-1]
topWidget = grabInfo['grabWindow']
globalMode = grabInfo['globalMode']
if globalMode == 'nograb':
return
while 1:
try:
if globalMode:
topWidget.grab_set_global()
else:
topWidget.grab_set()
break
except Tkinter.TclError:
# Another application has grab. Keep trying until
# grab can succeed.
topWidget.after(100)
#=============================================================================
class MegaToplevel(MegaArchetype):
def __init__(self, parent = None, **kw):
# Define the options for this megawidget.
optiondefs = (
('activatecommand', None, None),
('deactivatecommand', None, None),
('master', None, None),
('title', None, self._settitle),
('hull_class', self.__class__.__name__, None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaArchetype.__init__(self, parent, Tkinter.Toplevel)
# Initialise instance.
# Set WM_DELETE_WINDOW protocol, deleting any old callback, so
# memory does not leak.
if hasattr(self._hull, '_Pmw_WM_DELETE_name'):
self._hull.tk.deletecommand(self._hull._Pmw_WM_DELETE_name)
self._hull._Pmw_WM_DELETE_name = \
self.register(self._userDeleteWindow, needcleanup = 0)
self.protocol('WM_DELETE_WINDOW', self._hull._Pmw_WM_DELETE_name)
# Initialise instance variables.
self._firstShowing = 1
# Used by show() to ensure window retains previous position on screen.
# The IntVar() variable to wait on during a modal dialog.
self._wait = None
self._active = 0
self._userDeleteFunc = self.destroy
self._userModalDeleteFunc = self.deactivate
# Check keywords and initialise options.
self.initialiseoptions()
def _settitle(self):
title = self['title']
if title is not None:
self.title(title)
def userdeletefunc(self, func=None):
if func:
self._userDeleteFunc = func
else:
return self._userDeleteFunc
def usermodaldeletefunc(self, func=None):
if func:
self._userModalDeleteFunc = func
else:
return self._userModalDeleteFunc
def _userDeleteWindow(self):
if self.active():
self._userModalDeleteFunc()
else:
self._userDeleteFunc()
def destroy(self):
# Allow this to be called more than once.
if _hullToMegaWidget.has_key(self._hull):
self.deactivate()
# Remove circular references, so that object can get cleaned up.
del self._userDeleteFunc
del self._userModalDeleteFunc
MegaArchetype.destroy(self)
def show(self, master = None):
if self.state() != 'normal':
if self._firstShowing:
# Just let the window manager determine the window
# position for the first time.
geom = None
else:
# Position the window at the same place it was last time.
geom = self._sameposition()
setgeometryanddeiconify(self, geom)
if self._firstShowing:
self._firstShowing = 0
else:
if self.transient() == '':
self.tkraise()
# Do this last, otherwise get flashing on NT:
if master is not None:
if master == 'parent':
parent = self.winfo_parent()
# winfo_parent() should return the parent widget, but the
# the current version of Tkinter returns a string.
if type(parent) == types.StringType:
parent = self._hull._nametowidget(parent)
master = parent.winfo_toplevel()
self.transient(master)
self.focus()
def _centreonscreen(self):
# Centre the window on the screen. (Actually halfway across
# and one third down.)
parent = self.winfo_parent()
if type(parent) == types.StringType:
parent = self._hull._nametowidget(parent)
# Find size of window.
self.update_idletasks()
width = self.winfo_width()
height = self.winfo_height()
if width == 1 and height == 1:
# If the window has not yet been displayed, its size is
# reported as 1x1, so use requested size.
width = self.winfo_reqwidth()
height = self.winfo_reqheight()
# Place in centre of screen:
x = (self.winfo_screenwidth() - width) / 2 - parent.winfo_vrootx()
y = (self.winfo_screenheight() - height) / 3 - parent.winfo_vrooty()
if x < 0:
x = 0
if y < 0:
y = 0
return '+%d+%d' % (x, y)
def _sameposition(self):
# Position the window at the same place it was last time.
geometry = self.geometry()
index = string.find(geometry, '+')
if index >= 0:
return geometry[index:]
else:
return None
def activate(self, globalMode = 0, geometry = 'centerscreenfirst'):
if self._active:
raise ValueError, 'Window is already active'
if self.state() == 'normal':
self.withdraw()
self._active = 1
showbusycursor()
if self._wait is None:
self._wait = Tkinter.IntVar()
self._wait.set(0)
if geometry == 'centerscreenalways':
geom = self._centreonscreen()
elif geometry == 'centerscreenfirst':
if self._firstShowing:
# Centre the window the first time it is displayed.
geom = self._centreonscreen()
else:
# Position the window at the same place it was last time.
geom = self._sameposition()
elif geometry[:5] == 'first':
if self._firstShowing:
geom = geometry[5:]
else:
# Position the window at the same place it was last time.
geom = self._sameposition()
else:
geom = geometry
self._firstShowing = 0
setgeometryanddeiconify(self, geom)
# Do this last, otherwise get flashing on NT:
master = self['master']
if master is not None:
if master == 'parent':
parent = self.winfo_parent()
# winfo_parent() should return the parent widget, but the
# the current version of Tkinter returns a string.
if type(parent) == types.StringType:
parent = self._hull._nametowidget(parent)
master = parent.winfo_toplevel()
self.transient(master)
pushgrab(self._hull, globalMode, self.deactivate)
command = self['activatecommand']
if callable(command):
command()
self.wait_variable(self._wait)
return self._result
def deactivate(self, result=None):
if not self._active:
return
self._active = 0
# Restore the focus before withdrawing the window, since
# otherwise the window manager may take the focus away so we
# can't redirect it. Also, return the grab to the next active
# window in the stack, if any.
popgrab(self._hull)
command = self['deactivatecommand']
if callable(command):
command()
self.withdraw()
hidebusycursor(forceFocusRestore = 1)
self._result = result
self._wait.set(1)
def active(self):
return self._active
forwardmethods(MegaToplevel, Tkinter.Toplevel, '_hull')
#=============================================================================
class MegaWidget(MegaArchetype):
def __init__(self, parent = None, **kw):
# Define the options for this megawidget.
optiondefs = (
('hull_class', self.__class__.__name__, None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaArchetype.__init__(self, parent, Tkinter.Frame)
# Check keywords and initialise options.
self.initialiseoptions()
forwardmethods(MegaWidget, Tkinter.Frame, '_hull')
#=============================================================================
# Public functions
#-----------------
_traceTk = 0
def tracetk(root = None, on = 1, withStackTrace = 0, file=None):
global _withStackTrace
global _traceTkFile
global _traceTk
if root is None:
root = Tkinter._default_root
_withStackTrace = withStackTrace
_traceTk = on
if on:
if hasattr(root.tk, '__class__'):
# Tracing already on
return
if file is None:
_traceTkFile = sys.stderr
else:
_traceTkFile = file
tk = _TraceTk(root.tk)
else:
if not hasattr(root.tk, '__class__'):
# Tracing already off
return
tk = root.tk.getTclInterp()
_setTkInterps(root, tk)
def showbusycursor():
_addRootToToplevelBusyInfo()
root = Tkinter._default_root
busyInfo = {
'newBusyWindows' : [],
'previousFocus' : None,
'busyFocus' : None,
}
_busyStack.append(busyInfo)
if _disableKeyboardWhileBusy:
# Remember the focus as it is now, before it is changed.
busyInfo['previousFocus'] = root.tk.call('focus')
if not _havebltbusy(root):
# No busy command, so don't call busy hold on any windows.
return
for (window, winInfo) in _toplevelBusyInfo.items():
if (window.state() != 'withdrawn' and not winInfo['isBusy']
and not winInfo['excludeFromBusy']):
busyInfo['newBusyWindows'].append(window)
winInfo['isBusy'] = 1
_busy_hold(window, winInfo['busyCursorName'])
# Make sure that no events for the busy window get
# through to Tkinter, otherwise it will crash in
# _nametowidget with a 'KeyError: _Busy' if there is
# a binding on the toplevel window.
window.tk.call('bindtags', winInfo['busyWindow'], 'Pmw_Dummy_Tag')
if _disableKeyboardWhileBusy:
# Remember previous focus widget for this toplevel window
# and set focus to the busy window, which will ignore all
# keyboard events.
winInfo['windowFocus'] = \
window.tk.call('focus', '-lastfor', window._w)
window.tk.call('focus', winInfo['busyWindow'])
busyInfo['busyFocus'] = winInfo['busyWindow']
if len(busyInfo['newBusyWindows']) > 0:
if os.name == 'nt':
# NT needs an "update" before it will change the cursor.
window.update()
else:
window.update_idletasks()
def hidebusycursor(forceFocusRestore = 0):
# Remember the focus as it is now, before it is changed.
root = Tkinter._default_root
if _disableKeyboardWhileBusy:
currentFocus = root.tk.call('focus')
# Pop the busy info off the stack.
busyInfo = _busyStack[-1]
del _busyStack[-1]
for window in busyInfo['newBusyWindows']:
# If this window has not been deleted, release the busy cursor.
if _toplevelBusyInfo.has_key(window):
winInfo = _toplevelBusyInfo[window]
winInfo['isBusy'] = 0
_busy_release(window)
if _disableKeyboardWhileBusy:
# Restore previous focus window for this toplevel window,
# but only if is still set to the busy window (it may have
# been changed).
windowFocusNow = window.tk.call('focus', '-lastfor', window._w)
if windowFocusNow == winInfo['busyWindow']:
try:
window.tk.call('focus', winInfo['windowFocus'])
except Tkinter.TclError:
# Previous focus widget has been deleted. Set focus
# to toplevel window instead (can't leave focus on
# busy window).
window.focus_set()
if _disableKeyboardWhileBusy:
# Restore the focus, depending on whether the focus had changed
# between the calls to showbusycursor and hidebusycursor.
if forceFocusRestore or busyInfo['busyFocus'] == currentFocus:
# The focus had not changed, so restore it to as it was before
# the call to showbusycursor,
previousFocus = busyInfo['previousFocus']
if previousFocus is not None:
try:
root.tk.call('focus', previousFocus)
except Tkinter.TclError:
# Previous focus widget has been deleted; forget it.
pass
else:
# The focus had changed, so restore it to what it had been
# changed to before the call to hidebusycursor.
root.tk.call('focus', currentFocus)
def clearbusycursor():
while len(_busyStack) > 0:
hidebusycursor()
def setbusycursorattributes(window, **kw):
_addRootToToplevelBusyInfo()
for name, value in kw.items():
if name == 'exclude':
_toplevelBusyInfo[window]['excludeFromBusy'] = value
elif name == 'cursorName':
_toplevelBusyInfo[window]['busyCursorName'] = value
else:
raise KeyError, 'Unknown busycursor attribute "' + name + '"'
def _addRootToToplevelBusyInfo():
# Include the Tk root window in the list of toplevels. This must
# not be called before Tkinter has had a chance to be initialised by
# the application.
root = Tkinter._default_root
if root == None:
root = Tkinter.Tk()
if not _toplevelBusyInfo.has_key(root):
_addToplevelBusyInfo(root)
def busycallback(command, updateFunction = None):
if not callable(command):
raise ValueError, \
'cannot register non-command busy callback %s %s' % \
(repr(command), type(command))
wrapper = _BusyWrapper(command, updateFunction)
return wrapper.callback
_errorReportFile = None
_errorWindow = None
def reporterrorstofile(file = None):
global _errorReportFile
_errorReportFile = file
def displayerror(text):
global _errorWindow
if _errorReportFile is not None:
_errorReportFile.write(text + '\n')
else:
# Print error on standard error as well as to error window.
# Useful if error window fails to be displayed, for example
# when exception is triggered in a <Destroy> binding for root
# window.
sys.stderr.write(text + '\n')
if _errorWindow is None:
# The error window has not yet been created.
_errorWindow = _ErrorWindow()
_errorWindow.showerror(text)
_root = None
_disableKeyboardWhileBusy = 1
def initialise(
root = None,
size = None,
fontScheme = None,
useTkOptionDb = 0,
noBltBusy = 0,
disableKeyboardWhileBusy = None,
):
# Remember if show/hidebusycursor should ignore keyboard events.
global _disableKeyboardWhileBusy
if disableKeyboardWhileBusy is not None:
_disableKeyboardWhileBusy = disableKeyboardWhileBusy
# Do not use blt busy command if noBltBusy is set. Otherwise,
# use blt busy if it is available.
global _haveBltBusy
if noBltBusy:
_haveBltBusy = 0
# Save flag specifying whether the Tk option database should be
# queried when setting megawidget option default values.
global _useTkOptionDb
_useTkOptionDb = useTkOptionDb
# If we haven't been given a root window, use the default or
# create one.
if root is None:
if Tkinter._default_root is None:
root = Tkinter.Tk()
else:
root = Tkinter._default_root
# If this call is initialising a different Tk interpreter than the
# last call, then re-initialise all global variables. Assume the
# last interpreter has been destroyed - ie: Pmw does not (yet)
# support multiple simultaneous interpreters.
global _root
if _root is not None and _root != root:
global _busyStack
global _errorWindow
global _grabStack
global _hullToMegaWidget
global _toplevelBusyInfo
_busyStack = []
_errorWindow = None
_grabStack = []
_hullToMegaWidget = {}
_toplevelBusyInfo = {}
_root = root
# Trap Tkinter Toplevel constructors so that a list of Toplevels
# can be maintained.
Tkinter.Toplevel.title = __TkinterToplevelTitle
# Trap Tkinter widget destruction so that megawidgets can be
# destroyed when their hull widget is destoyed and the list of
# Toplevels can be pruned.
Tkinter.Toplevel.destroy = __TkinterToplevelDestroy
Tkinter.Widget.destroy = __TkinterWidgetDestroy
# Modify Tkinter's CallWrapper class to improve the display of
# errors which occur in callbacks.
Tkinter.CallWrapper = __TkinterCallWrapper
# Make sure we get to know when the window manager deletes the
# root window. Only do this if the protocol has not yet been set.
# This is required if there is a modal dialog displayed and the
# window manager deletes the root window. Otherwise the
# application will not exit, even though there are no windows.
if root.protocol('WM_DELETE_WINDOW') == '':
root.protocol('WM_DELETE_WINDOW', root.destroy)
# Set the base font size for the application and set the
# Tk option database font resources.
_font_initialise(root, size, fontScheme)
return root
def alignlabels(widgets, sticky = None):
if len(widgets) == 0:
return
widgets[0].update_idletasks()
# Determine the size of the maximum length label string.
maxLabelWidth = 0
for iwid in widgets:
labelWidth = iwid.grid_bbox(0, 1)[2]
if labelWidth > maxLabelWidth:
maxLabelWidth = labelWidth
# Adjust the margins for the labels such that the child sites and
# labels line up.
for iwid in widgets:
if sticky is not None:
iwid.component('label').grid(sticky=sticky)
iwid.grid_columnconfigure(0, minsize = maxLabelWidth)
#=============================================================================
# Private routines
#-----------------
_callToTkReturned = 1
_recursionCounter = 1
class _TraceTk:
def __init__(self, tclInterp):
self.tclInterp = tclInterp
def getTclInterp(self):
return self.tclInterp
# Calling from python into Tk.
def call(self, *args, **kw):
global _callToTkReturned
global _recursionCounter
_callToTkReturned = 0
if len(args) == 1 and type(args[0]) == types.TupleType:
argStr = str(args[0])
else:
argStr = str(args)
_traceTkFile.write('CALL TK> %d:%s%s' %
(_recursionCounter, ' ' * _recursionCounter, argStr))
_recursionCounter = _recursionCounter + 1
try:
result = apply(self.tclInterp.call, args, kw)
except Tkinter.TclError, errorString:
_callToTkReturned = 1
_recursionCounter = _recursionCounter - 1
_traceTkFile.write('\nTK ERROR> %d:%s-> %s\n' %
(_recursionCounter, ' ' * _recursionCounter,
repr(errorString)))
if _withStackTrace:
_traceTkFile.write('CALL TK> stack:\n')
traceback.print_stack()
raise Tkinter.TclError, errorString
_recursionCounter = _recursionCounter - 1
if _callToTkReturned:
_traceTkFile.write('CALL RTN> %d:%s-> %s' %
(_recursionCounter, ' ' * _recursionCounter, repr(result)))
else:
_callToTkReturned = 1
if result:
_traceTkFile.write(' -> %s' % repr(result))
_traceTkFile.write('\n')
if _withStackTrace:
_traceTkFile.write('CALL TK> stack:\n')
traceback.print_stack()
_traceTkFile.flush()
return result
def __getattr__(self, key):
return getattr(self.tclInterp, key)
def _setTkInterps(window, tk):
window.tk = tk
for child in window.children.values():
_setTkInterps(child, tk)
#=============================================================================
# Functions to display a busy cursor. Keep a list of all toplevels
# and display the busy cursor over them. The list will contain the Tk
# root toplevel window as well as all other toplevel windows.
# Also keep a list of the widget which last had focus for each
# toplevel.
# Map from toplevel windows to
# {'isBusy', 'windowFocus', 'busyWindow',
# 'excludeFromBusy', 'busyCursorName'}
_toplevelBusyInfo = {}
# Pmw needs to know all toplevel windows, so that it can call blt busy
# on them. This is a hack so we get notified when a Tk topevel is
# created. Ideally, the __init__ 'method' should be overridden, but
# it is a 'read-only special attribute'. Luckily, title() is always
# called from the Tkinter Toplevel constructor.
def _addToplevelBusyInfo(window):
if window._w == '.':
busyWindow = '._Busy'
else:
busyWindow = window._w + '._Busy'
_toplevelBusyInfo[window] = {
'isBusy' : 0,
'windowFocus' : None,
'busyWindow' : busyWindow,
'excludeFromBusy' : 0,
'busyCursorName' : None,
}
def __TkinterToplevelTitle(self, *args):
# If this is being called from the constructor, include this
# Toplevel in the list of toplevels and set the initial
# WM_DELETE_WINDOW protocol to destroy() so that we get to know
# about it.
if not _toplevelBusyInfo.has_key(self):
_addToplevelBusyInfo(self)
self._Pmw_WM_DELETE_name = self.register(self.destroy, None, 0)
self.protocol('WM_DELETE_WINDOW', self._Pmw_WM_DELETE_name)
return apply(Tkinter.Wm.title, (self,) + args)
_haveBltBusy = None
def _havebltbusy(window):
global _busy_hold, _busy_release, _haveBltBusy
if _haveBltBusy is None:
import PmwBlt
_haveBltBusy = PmwBlt.havebltbusy(window)
_busy_hold = PmwBlt.busy_hold
if os.name == 'nt':
# There is a bug in Blt 2.4i on NT where the busy window
# does not follow changes in the children of a window.
# Using forget works around the problem.
_busy_release = PmwBlt.busy_forget
else:
_busy_release = PmwBlt.busy_release
return _haveBltBusy
class _BusyWrapper:
def __init__(self, command, updateFunction):
self._command = command
self._updateFunction = updateFunction
def callback(self, *args):
showbusycursor()
rtn = apply(self._command, args)
# Call update before hiding the busy windows to clear any
# events that may have occurred over the busy windows.
if callable(self._updateFunction):
self._updateFunction()
hidebusycursor()
return rtn
#=============================================================================
def drawarrow(canvas, color, direction, tag, baseOffset = 0.25, edgeOffset = 0.15):
canvas.delete(tag)
bw = (string.atoi(canvas['borderwidth']) +
string.atoi(canvas['highlightthickness']))
width = string.atoi(canvas['width'])
height = string.atoi(canvas['height'])
if direction in ('up', 'down'):
majorDimension = height
minorDimension = width
else:
majorDimension = width
minorDimension = height
offset = round(baseOffset * majorDimension)
if direction in ('down', 'right'):
base = bw + offset
apex = bw + majorDimension - offset
else:
base = bw + majorDimension - offset
apex = bw + offset
if minorDimension > 3 and minorDimension % 2 == 0:
minorDimension = minorDimension - 1
half = int(minorDimension * (1 - 2 * edgeOffset)) / 2
low = round(bw + edgeOffset * minorDimension)
middle = low + half
high = low + 2 * half
if direction in ('up', 'down'):
coords = (low, base, high, base, middle, apex)
else:
coords = (base, low, base, high, apex, middle)
kw = {'fill' : color, 'outline' : color, 'tag' : tag}
apply(canvas.create_polygon, coords, kw)
#=============================================================================
# Modify the Tkinter destroy methods so that it notifies us when a Tk
# toplevel or frame is destroyed.
# A map from the 'hull' component of a megawidget to the megawidget.
# This is used to clean up a megawidget when its hull is destroyed.
_hullToMegaWidget = {}
def __TkinterToplevelDestroy(tkWidget):
if _hullToMegaWidget.has_key(tkWidget):
mega = _hullToMegaWidget[tkWidget]
try:
mega.destroy()
except:
_reporterror(mega.destroy, ())
else:
# Delete the busy info structure for this toplevel (if the
# window was created before initialise() was called, it
# will not have any.
if _toplevelBusyInfo.has_key(tkWidget):
del _toplevelBusyInfo[tkWidget]
if hasattr(tkWidget, '_Pmw_WM_DELETE_name'):
tkWidget.tk.deletecommand(tkWidget._Pmw_WM_DELETE_name)
del tkWidget._Pmw_WM_DELETE_name
Tkinter.BaseWidget.destroy(tkWidget)
def __TkinterWidgetDestroy(tkWidget):
if _hullToMegaWidget.has_key(tkWidget):
mega = _hullToMegaWidget[tkWidget]
try:
mega.destroy()
except:
_reporterror(mega.destroy, ())
else:
Tkinter.BaseWidget.destroy(tkWidget)
#=============================================================================
# Add code to Tkinter to improve the display of errors which occur in
# callbacks.
class __TkinterCallWrapper:
def __init__(self, func, subst, widget):
self.func = func
self.subst = subst
self.widget = widget
# Calling back from Tk into python.
def __call__(self, *args):
try:
if self.subst:
args = apply(self.subst, args)
if _traceTk:
if not _callToTkReturned:
_traceTkFile.write('\n')
if hasattr(self.func, 'im_class'):
name = self.func.im_class.__name__ + '.' + \
self.func.__name__
else:
name = self.func.__name__
if len(args) == 1 and hasattr(args[0], 'type'):
# The argument to the callback is an event.
eventName = _eventTypeToName[string.atoi(args[0].type)]
if eventName in ('KeyPress', 'KeyRelease',):
argStr = '(%s %s Event: %s)' % \
(eventName, args[0].keysym, args[0].widget)
else:
argStr = '(%s Event, %s)' % (eventName, args[0].widget)
else:
argStr = str(args)
_traceTkFile.write('CALLBACK> %d:%s%s%s\n' %
(_recursionCounter, ' ' * _recursionCounter, name, argStr))
_traceTkFile.flush()
return apply(self.func, args)
except SystemExit, msg:
raise SystemExit, msg
except:
_reporterror(self.func, args)
_eventTypeToName = {
2 : 'KeyPress', 15 : 'VisibilityNotify', 28 : 'PropertyNotify',
3 : 'KeyRelease', 16 : 'CreateNotify', 29 : 'SelectionClear',
4 : 'ButtonPress', 17 : 'DestroyNotify', 30 : 'SelectionRequest',
5 : 'ButtonRelease', 18 : 'UnmapNotify', 31 : 'SelectionNotify',
6 : 'MotionNotify', 19 : 'MapNotify', 32 : 'ColormapNotify',
7 : 'EnterNotify', 20 : 'MapRequest', 33 : 'ClientMessage',
8 : 'LeaveNotify', 21 : 'ReparentNotify', 34 : 'MappingNotify',
9 : 'FocusIn', 22 : 'ConfigureNotify', 35 : 'VirtualEvents',
10 : 'FocusOut', 23 : 'ConfigureRequest', 36 : 'ActivateNotify',
11 : 'KeymapNotify', 24 : 'GravityNotify', 37 : 'DeactivateNotify',
12 : 'Expose', 25 : 'ResizeRequest', 38 : 'MouseWheelEvent',
13 : 'GraphicsExpose', 26 : 'CirculateNotify',
14 : 'NoExpose', 27 : 'CirculateRequest',
}
def _reporterror(func, args):
# Fetch current exception values.
exc_type, exc_value, exc_traceback = sys.exc_info()
# Give basic information about the callback exception.
if type(exc_type) == types.ClassType:
# Handle python 1.5 class exceptions.
exc_type = exc_type.__name__
msg = str(exc_type) + ' Exception in Tk callback\n'
msg = msg + ' Function: %s (type: %s)\n' % (repr(func), type(func))
msg = msg + ' Args: %s\n' % str(args)
if type(args) == types.TupleType and len(args) > 0 and \
hasattr(args[0], 'type'):
eventArg = 1
else:
eventArg = 0
# If the argument to the callback is an event, add the event type.
if eventArg:
eventNum = string.atoi(args[0].type)
if eventNum in _eventTypeToName.keys():
msg = msg + ' Event type: %s (type num: %d)\n' % \
(_eventTypeToName[eventNum], eventNum)
else:
msg = msg + ' Unknown event type (type num: %d)\n' % eventNum
# Add the traceback.
msg = msg + 'Traceback (innermost last):\n'
for tr in traceback.extract_tb(exc_traceback):
msg = msg + ' File "%s", line %s, in %s\n' % (tr[0], tr[1], tr[2])
msg = msg + ' %s\n' % tr[3]
msg = msg + '%s: %s\n' % (exc_type, exc_value)
# If the argument to the callback is an event, add the event contents.
if eventArg:
msg = msg + '\n================================================\n'
msg = msg + ' Event contents:\n'
keys = args[0].__dict__.keys()
keys.sort()
for key in keys:
msg = msg + ' %s: %s\n' % (key, args[0].__dict__[key])
clearbusycursor()
try:
displayerror(msg)
except:
pass
class _ErrorWindow:
def __init__(self):
self._errorQueue = []
self._errorCount = 0
self._open = 0
self._firstShowing = 1
# Create the toplevel window
self._top = Tkinter.Toplevel()
self._top.protocol('WM_DELETE_WINDOW', self._hide)
self._top.title('Error in background function')
self._top.iconname('Background error')
# Create the text widget and scrollbar in a frame
upperframe = Tkinter.Frame(self._top)
scrollbar = Tkinter.Scrollbar(upperframe, orient='vertical')
scrollbar.pack(side = 'right', fill = 'y')
self._text = Tkinter.Text(upperframe, yscrollcommand=scrollbar.set)
self._text.pack(fill = 'both', expand = 1)
scrollbar.configure(command=self._text.yview)
# Create the buttons and label in a frame
lowerframe = Tkinter.Frame(self._top)
ignore = Tkinter.Button(lowerframe,
text = 'Ignore remaining errors', command = self._hide)
ignore.pack(side='left')
self._nextError = Tkinter.Button(lowerframe,
text = 'Show next error', command = self._next)
self._nextError.pack(side='left')
self._label = Tkinter.Label(lowerframe, relief='ridge')
self._label.pack(side='left', fill='x', expand=1)
# Pack the lower frame first so that it does not disappear
# when the window is resized.
lowerframe.pack(side = 'bottom', fill = 'x')
upperframe.pack(side = 'bottom', fill = 'both', expand = 1)
def showerror(self, text):
if self._open:
self._errorQueue.append(text)
else:
self._display(text)
self._open = 1
# Display the error window in the same place it was before.
if self._top.state() == 'normal':
# If update_idletasks is not called here, the window may
# be placed partially off the screen. Also, if it is not
# called and many errors are generated quickly in
# succession, the error window may not display errors
# until the last one is generated and the interpreter
# becomes idle.
# XXX: remove this, since it causes omppython to go into an
# infinite loop if an error occurs in an omp callback.
# self._top.update_idletasks()
pass
else:
if self._firstShowing:
geom = None
else:
geometry = self._top.geometry()
index = string.find(geometry, '+')
if index >= 0:
geom = geometry[index:]
else:
geom = None
setgeometryanddeiconify(self._top, geom)
if self._firstShowing:
self._firstShowing = 0
else:
self._top.tkraise()
self._top.focus()
self._updateButtons()
# Release any grab, so that buttons in the error window work.
releasegrabs()
def _hide(self):
self._errorCount = self._errorCount + len(self._errorQueue)
self._errorQueue = []
self._top.withdraw()
self._open = 0
def _next(self):
# Display the next error in the queue.
text = self._errorQueue[0]
del self._errorQueue[0]
self._display(text)
self._updateButtons()
def _display(self, text):
self._errorCount = self._errorCount + 1
text = 'Error: %d\n%s' % (self._errorCount, text)
self._text.delete('1.0', 'end')
self._text.insert('end', text)
def _updateButtons(self):
numQueued = len(self._errorQueue)
if numQueued > 0:
self._label.configure(text='%d more errors' % numQueued)
self._nextError.configure(state='normal')
else:
self._label.configure(text='No more errors')
self._nextError.configure(state='disabled')
_bltImported = 1
_bltbusyOK = 0
######################################################################
### File: PmwDialog.py
# Based on iwidgets2.2.0/dialog.itk and iwidgets2.2.0/dialogshell.itk code.
# Convention:
# Each dialog window should have one of these as the rightmost button:
# Close Close a window which only displays information.
# Cancel Close a window which may be used to change the state of
# the application.
import sys
import types
import Tkinter
# A Toplevel with a ButtonBox and child site.
class Dialog(MegaToplevel):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('buttonbox_hull_borderwidth', 1, None),
('buttonbox_hull_relief', 'raised', None),
('buttonboxpos', 's', INITOPT),
('buttons', ('OK',), self._buttons),
('command', None, None),
('dialogchildsite_borderwidth', 1, None),
('dialogchildsite_relief', 'raised', None),
('defaultbutton', None, self._defaultButton),
('master', 'parent', None),
('separatorwidth', 0, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaToplevel.__init__(self, parent)
# Create the components.
oldInterior = MegaToplevel.interior(self)
# Set up pack options according to the position of the button box.
pos = self['buttonboxpos']
if pos not in 'nsew':
raise ValueError, \
'bad buttonboxpos option "%s": should be n, s, e, or w' \
% pos
if pos in 'ns':
orient = 'horizontal'
fill = 'x'
if pos == 'n':
side = 'top'
else:
side = 'bottom'
else:
orient = 'vertical'
fill = 'y'
if pos == 'w':
side = 'left'
else:
side = 'right'
# Create the button box.
self._buttonBox = self.createcomponent('buttonbox',
(), None,
ButtonBox, (oldInterior,), orient = orient)
self._buttonBox.pack(side = side, fill = fill)
# Create the separating line.
width = self['separatorwidth']
if width > 0:
self._separator = self.createcomponent('separator',
(), None,
Tkinter.Frame, (oldInterior,), relief = 'sunken',
height = width, width = width, borderwidth = width / 2)
self._separator.pack(side = side, fill = fill)
# Create the child site.
self.__dialogChildSite = self.createcomponent('dialogchildsite',
(), None,
Tkinter.Frame, (oldInterior,))
self.__dialogChildSite.pack(side=side, fill='both', expand=1)
self.oldButtons = ()
self.oldDefault = None
self.bind('<Return>', self._invokeDefault)
self.userdeletefunc(self._doCommand)
self.usermodaldeletefunc(self._doCommand)
# Check keywords and initialise options.
self.initialiseoptions()
def interior(self):
return self.__dialogChildSite
def invoke(self, index = DEFAULT):
return self._buttonBox.invoke(index)
def _invokeDefault(self, event):
try:
self._buttonBox.index(DEFAULT)
except ValueError:
return
self._buttonBox.invoke()
def _doCommand(self, name = None):
if name is not None and self.active() and \
grabstacktopwindow() != self.component('hull'):
# This is a modal dialog but is not on the top of the grab
# stack (ie: should not have the grab), so ignore this
# event. This seems to be a bug in Tk and may occur in
# nested modal dialogs.
#
# An example is the PromptDialog demonstration. To
# trigger the problem, start the demo, then move the mouse
# to the main window, hit <TAB> and then <TAB> again. The
# highlight border of the "Show prompt dialog" button
# should now be displayed. Now hit <SPACE>, <RETURN>,
# <RETURN> rapidly several times. Eventually, hitting the
# return key invokes the password dialog "OK" button even
# though the confirm dialog is active (and therefore
# should have the keyboard focus). Observed under Solaris
# 2.5.1, python 1.5.2 and Tk8.0.
# TODO: Give focus to the window on top of the grabstack.
return
command = self['command']
if callable(command):
return command(name)
else:
if self.active():
self.deactivate(name)
else:
self.withdraw()
def _buttons(self):
buttons = self['buttons']
if type(buttons) != types.TupleType and type(buttons) != types.ListType:
raise ValueError, \
'bad buttons option "%s": should be a tuple' % str(buttons)
if self.oldButtons == buttons:
return
self.oldButtons = buttons
for index in range(self._buttonBox.numbuttons()):
self._buttonBox.delete(0)
for name in buttons:
self._buttonBox.add(name,
command=lambda self=self, name=name: self._doCommand(name))
if len(buttons) > 0:
defaultbutton = self['defaultbutton']
if defaultbutton is None:
self._buttonBox.setdefault(None)
else:
try:
self._buttonBox.index(defaultbutton)
except ValueError:
pass
else:
self._buttonBox.setdefault(defaultbutton)
self._buttonBox.alignbuttons()
def _defaultButton(self):
defaultbutton = self['defaultbutton']
if self.oldDefault == defaultbutton:
return
self.oldDefault = defaultbutton
if len(self['buttons']) > 0:
if defaultbutton is None:
self._buttonBox.setdefault(None)
else:
try:
self._buttonBox.index(defaultbutton)
except ValueError:
pass
else:
self._buttonBox.setdefault(defaultbutton)
######################################################################
### File: PmwTimeFuncs.py
# Functions for dealing with dates and times.
import re
import string
def timestringtoseconds(text, separator = ':'):
inputList = string.split(string.strip(text), separator)
if len(inputList) != 3:
raise ValueError, 'invalid value: ' + text
sign = 1
if len(inputList[0]) > 0 and inputList[0][0] in ('+', '-'):
if inputList[0][0] == '-':
sign = -1
inputList[0] = inputList[0][1:]
if re.search('[^0-9]', string.join(inputList, '')) is not None:
raise ValueError, 'invalid value: ' + text
hour = string.atoi(inputList[0])
minute = string.atoi(inputList[1])
second = string.atoi(inputList[2])
if minute >= 60 or second >= 60:
raise ValueError, 'invalid value: ' + text
return sign * (hour * 60 * 60 + minute * 60 + second)
_year_pivot = 50
_century = 2000
def setyearpivot(pivot, century = None):
global _year_pivot
global _century
oldvalues = (_year_pivot, _century)
_year_pivot = pivot
if century is not None:
_century = century
return oldvalues
def datestringtojdn(text, format = 'ymd', separator = '/'):
inputList = string.split(string.strip(text), separator)
if len(inputList) != 3:
raise ValueError, 'invalid value: ' + text
if re.search('[^0-9]', string.join(inputList, '')) is not None:
raise ValueError, 'invalid value: ' + text
formatList = list(format)
day = string.atoi(inputList[formatList.index('d')])
month = string.atoi(inputList[formatList.index('m')])
year = string.atoi(inputList[formatList.index('y')])
if _year_pivot is not None:
if year >= 0 and year < 100:
if year <= _year_pivot:
year = year + _century
else:
year = year + _century - 100
jdn = ymdtojdn(year, month, day)
if jdntoymd(jdn) != (year, month, day):
raise ValueError, 'invalid value: ' + text
return jdn
def _cdiv(a, b):
# Return a / b as calculated by most C language implementations,
# assuming both a and b are integers.
if a * b > 0:
return a / b
else:
return -(abs(a) / abs(b))
def ymdtojdn(year, month, day, julian = -1, papal = 1):
# set Julian flag if auto set
if julian < 0:
if papal: # Pope Gregory XIII's decree
lastJulianDate = 15821004L # last day to use Julian calendar
else: # British-American usage
lastJulianDate = 17520902L # last day to use Julian calendar
julian = ((year * 100L) + month) * 100 + day <= lastJulianDate
if year < 0:
# Adjust BC year
year = year + 1
if julian:
return 367L * year - _cdiv(7 * (year + 5001L + _cdiv((month - 9), 7)), 4) + \
_cdiv(275 * month, 9) + day + 1729777L
else:
return (day - 32076L) + \
_cdiv(1461L * (year + 4800L + _cdiv((month - 14), 12)), 4) + \
_cdiv(367 * (month - 2 - _cdiv((month - 14), 12) * 12), 12) - \
_cdiv((3 * _cdiv((year + 4900L + _cdiv((month - 14), 12)), 100)), 4) + \
1 # correction by rdg
def jdntoymd(jdn, julian = -1, papal = 1):
# set Julian flag if auto set
if julian < 0:
if papal: # Pope Gregory XIII's decree
lastJulianJdn = 2299160L # last jdn to use Julian calendar
else: # British-American usage
lastJulianJdn = 2361221L # last jdn to use Julian calendar
julian = (jdn <= lastJulianJdn);
x = jdn + 68569L
if julian:
x = x + 38
daysPer400Years = 146100L
fudgedDaysPer4000Years = 1461000L + 1
else:
daysPer400Years = 146097L
fudgedDaysPer4000Years = 1460970L + 31
z = _cdiv(4 * x, daysPer400Years)
x = x - _cdiv((daysPer400Years * z + 3), 4)
y = _cdiv(4000 * (x + 1), fudgedDaysPer4000Years)
x = x - _cdiv(1461 * y, 4) + 31
m = _cdiv(80 * x, 2447)
d = x - _cdiv(2447 * m, 80)
x = _cdiv(m, 11)
m = m + 2 - 12 * x
y = 100 * (z - 49) + y + x
# Convert from longs to integers.
yy = int(y)
mm = int(m)
dd = int(d)
if yy <= 0:
# Adjust BC years.
yy = yy - 1
return (yy, mm, dd)
def stringtoreal(text, separator = '.'):
if separator != '.':
if string.find(text, '.') >= 0:
raise ValueError, 'invalid value: ' + text
index = string.find(text, separator)
if index >= 0:
text = text[:index] + '.' + text[index + 1:]
return string.atof(text)
######################################################################
### File: PmwBalloon.py
import os
import string
import Tkinter
class Balloon(MegaToplevel):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('initwait', 500, None), # milliseconds
('label_background', 'lightyellow', None),
('label_foreground', 'black', None),
('label_justify', 'left', None),
('master', 'parent', None),
('relmouse', 'none', self._relmouse),
('state', 'both', self._state),
('statuscommand', None, None),
('xoffset', 20, None), # pixels
('yoffset', 1, None), # pixels
('hull_highlightthickness', 1, None),
('hull_highlightbackground', 'black', None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaToplevel.__init__(self, parent)
self.withdraw()
self.overrideredirect(1)
# Create the components.
interior = self.interior()
self._label = self.createcomponent('label',
(), None,
Tkinter.Label, (interior,))
self._label.pack()
# The default hull configuration options give a black border
# around the balloon, but avoids a black 'flash' when the
# balloon is deiconified, before the text appears.
if not kw.has_key('hull_background'):
self.configure(hull_background = \
str(self._label.cget('background')))
# Initialise instance variables.
self._timer = None
# The widget or item that is currently triggering the balloon.
# It is None if the balloon is not being displayed. It is a
# one-tuple if the balloon is being displayed in response to a
# widget binding (value is the widget). It is a two-tuple if
# the balloon is being displayed in response to a canvas or
# text item binding (value is the widget and the item).
self._currentTrigger = None
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
MegaToplevel.destroy(self)
def bind(self, widget, balloonHelp, statusHelp = None):
# If a previous bind for this widget exists, remove it.
self.unbind(widget)
if balloonHelp is None and statusHelp is None:
return
if statusHelp is None:
statusHelp = balloonHelp
enterId = widget.bind('<Enter>',
lambda event, self = self, w = widget,
sHelp = statusHelp, bHelp = balloonHelp:
self._enter(event, w, sHelp, bHelp, 0))
# Set Motion binding so that if the pointer remains at rest
# within the widget until the status line removes the help and
# then the pointer moves again, then redisplay the help in the
# status line.
# Note: The Motion binding only works for basic widgets, and
# the hull of megawidgets but not for other megawidget components.
motionId = widget.bind('<Motion>',
lambda event = None, self = self, statusHelp = statusHelp:
self.showstatus(statusHelp))
leaveId = widget.bind('<Leave>', self._leave)
buttonId = widget.bind('<ButtonPress>', self._buttonpress)
# Set Destroy binding so that the balloon can be withdrawn and
# the timer can be cancelled if the widget is destroyed.
destroyId = widget.bind('<Destroy>', self._destroy)
# Use the None item in the widget's private Pmw dictionary to
# store the widget's bind callbacks, for later clean up.
if not hasattr(widget, '_Pmw_BalloonBindIds'):
widget._Pmw_BalloonBindIds = {}
widget._Pmw_BalloonBindIds[None] = \
(enterId, motionId, leaveId, buttonId, destroyId)
def unbind(self, widget):
if hasattr(widget, '_Pmw_BalloonBindIds'):
if widget._Pmw_BalloonBindIds.has_key(None):
(enterId, motionId, leaveId, buttonId, destroyId) = \
widget._Pmw_BalloonBindIds[None]
# Need to pass in old bindings, so that Tkinter can
# delete the commands. Otherwise, memory is leaked.
widget.unbind('<Enter>', enterId)
widget.unbind('<Motion>', motionId)
widget.unbind('<Leave>', leaveId)
widget.unbind('<ButtonPress>', buttonId)
widget.unbind('<Destroy>', destroyId)
del widget._Pmw_BalloonBindIds[None]
if self._currentTrigger is not None and len(self._currentTrigger) == 1:
# The balloon is currently being displayed and the current
# trigger is a widget.
triggerWidget = self._currentTrigger[0]
if triggerWidget == widget:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def tagbind(self, widget, tagOrItem, balloonHelp, statusHelp = None):
# If a previous bind for this widget's tagOrItem exists, remove it.
self.tagunbind(widget, tagOrItem)
if balloonHelp is None and statusHelp is None:
return
if statusHelp is None:
statusHelp = balloonHelp
enterId = widget.tag_bind(tagOrItem, '<Enter>',
lambda event, self = self, w = widget,
sHelp = statusHelp, bHelp = balloonHelp:
self._enter(event, w, sHelp, bHelp, 1))
motionId = widget.tag_bind(tagOrItem, '<Motion>',
lambda event = None, self = self, statusHelp = statusHelp:
self.showstatus(statusHelp))
leaveId = widget.tag_bind(tagOrItem, '<Leave>', self._leave)
buttonId = widget.tag_bind(tagOrItem, '<ButtonPress>', self._buttonpress)
# Use the tagOrItem item in the widget's private Pmw dictionary to
# store the tagOrItem's bind callbacks, for later clean up.
if not hasattr(widget, '_Pmw_BalloonBindIds'):
widget._Pmw_BalloonBindIds = {}
widget._Pmw_BalloonBindIds[tagOrItem] = \
(enterId, motionId, leaveId, buttonId)
def tagunbind(self, widget, tagOrItem):
if hasattr(widget, '_Pmw_BalloonBindIds'):
if widget._Pmw_BalloonBindIds.has_key(tagOrItem):
(enterId, motionId, leaveId, buttonId) = \
widget._Pmw_BalloonBindIds[tagOrItem]
widget.tag_unbind(tagOrItem, '<Enter>', enterId)
widget.tag_unbind(tagOrItem, '<Motion>', motionId)
widget.tag_unbind(tagOrItem, '<Leave>', leaveId)
widget.tag_unbind(tagOrItem, '<ButtonPress>', buttonId)
del widget._Pmw_BalloonBindIds[tagOrItem]
if self._currentTrigger is None:
# The balloon is not currently being displayed.
return
if len(self._currentTrigger) == 1:
# The current trigger is a widget.
return
if len(self._currentTrigger) == 2:
# The current trigger is a canvas item.
(triggerWidget, triggerItem) = self._currentTrigger
if triggerWidget == widget and triggerItem == tagOrItem:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
else: # The current trigger is a text item.
(triggerWidget, x, y) = self._currentTrigger
if triggerWidget == widget:
currentPos = widget.index('@%d,%d' % (x, y))
currentTags = widget.tag_names(currentPos)
if tagOrItem in currentTags:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def showstatus(self, statusHelp):
if self['state'] in ('status', 'both'):
cmd = self['statuscommand']
if callable(cmd):
cmd(statusHelp)
def clearstatus(self):
self.showstatus(None)
def _state(self):
if self['state'] not in ('both', 'balloon', 'status', 'none'):
raise ValueError, 'bad state option ' + repr(self['state']) + \
': should be one of \'both\', \'balloon\', ' + \
'\'status\' or \'none\''
def _relmouse(self):
if self['relmouse'] not in ('both', 'x', 'y', 'none'):
raise ValueError, 'bad relmouse option ' + repr(self['relmouse'])+ \
': should be one of \'both\', \'x\', ' + '\'y\' or \'none\''
def _enter(self, event, widget, statusHelp, balloonHelp, isItem):
# Do not display balloon if mouse button is pressed. This
# will only occur if the button was pressed inside a widget,
# then the mouse moved out of and then back into the widget,
# with the button still held down. The number 0x1f00 is the
# button mask for the 5 possible buttons in X.
buttonPressed = (event.state & 0x1f00) != 0
if not buttonPressed and balloonHelp is not None and \
self['state'] in ('balloon', 'both'):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self._timer = self.after(self['initwait'],
lambda self = self, widget = widget, help = balloonHelp,
isItem = isItem:
self._showBalloon(widget, help, isItem))
if isItem:
if hasattr(widget, 'canvasx'):
# The widget is a canvas.
item = widget.find_withtag('current')
if len(item) > 0:
item = item[0]
else:
item = None
self._currentTrigger = (widget, item)
else:
# The widget is a text widget.
self._currentTrigger = (widget, event.x, event.y)
else:
self._currentTrigger = (widget,)
self.showstatus(statusHelp)
def _leave(self, event):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def _destroy(self, event):
# Only withdraw the balloon and cancel the timer if the widget
# being destroyed is the widget that triggered the balloon.
# Note that in a Tkinter Destroy event, the widget field is a
# string and not a widget as usual.
if self._currentTrigger is None:
# The balloon is not currently being displayed
return
if len(self._currentTrigger) == 1:
# The current trigger is a widget (not an item)
triggerWidget = self._currentTrigger[0]
if str(triggerWidget) == event.widget:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def _buttonpress(self, event):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self._currentTrigger = None
def _showBalloon(self, widget, balloonHelp, isItem):
self._label.configure(text = balloonHelp)
# First, display the balloon offscreen to get dimensions.
screenWidth = self.winfo_screenwidth()
screenHeight = self.winfo_screenheight()
self.geometry('+%d+0' % (screenWidth + 1))
self.update_idletasks()
if isItem:
# Get the bounding box of the current item.
bbox = widget.bbox('current')
if bbox is None:
# The item that triggered the balloon has disappeared,
# perhaps by a user's timer event that occured between
# the <Enter> event and the 'initwait' timer calling
# this method.
return
# The widget is either a text or canvas. The meaning of
# the values returned by the bbox method is different for
# each, so use the existence of the 'canvasx' method to
# distinguish between them.
if hasattr(widget, 'canvasx'):
# The widget is a canvas. Place balloon under canvas
# item. The positions returned by bbox are relative
# to the entire canvas, not just the visible part, so
# need to convert to window coordinates.
leftrel = bbox[0] - widget.canvasx(0)
toprel = bbox[1] - widget.canvasy(0)
bottomrel = bbox[3] - widget.canvasy(0)
else:
# The widget is a text widget. Place balloon under
# the character closest to the mouse. The positions
# returned by bbox are relative to the text widget
# window (ie the visible part of the text only).
leftrel = bbox[0]
toprel = bbox[1]
bottomrel = bbox[1] + bbox[3]
else:
leftrel = 0
toprel = 0
bottomrel = widget.winfo_height()
xpointer, ypointer = widget.winfo_pointerxy() # -1 if off screen
if xpointer >= 0 and self['relmouse'] in ('both', 'x'):
x = xpointer
else:
x = leftrel + widget.winfo_rootx()
x = x + self['xoffset']
if ypointer >= 0 and self['relmouse'] in ('both', 'y'):
y = ypointer
else:
y = bottomrel + widget.winfo_rooty()
y = y + self['yoffset']
edges = (string.atoi(str(self.cget('hull_highlightthickness'))) +
string.atoi(str(self.cget('hull_borderwidth')))) * 2
if x + self._label.winfo_reqwidth() + edges > screenWidth:
x = screenWidth - self._label.winfo_reqwidth() - edges
if y + self._label.winfo_reqheight() + edges > screenHeight:
if ypointer >= 0 and self['relmouse'] in ('both', 'y'):
y = ypointer
else:
y = toprel + widget.winfo_rooty()
y = y - self._label.winfo_reqheight() - self['yoffset'] - edges
setgeometryanddeiconify(self, '+%d+%d' % (x, y))
######################################################################
### File: PmwButtonBox.py
# Based on iwidgets2.2.0/buttonbox.itk code.
import types
import Tkinter
class ButtonBox(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('orient', 'horizontal', INITOPT),
('padx', 3, INITOPT),
('pady', 3, INITOPT),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Button',))
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
if self['labelpos'] is None:
self._buttonBoxFrame = self._hull
columnOrRow = 0
else:
self._buttonBoxFrame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (interior,))
self._buttonBoxFrame.grid(column=2, row=2, sticky='nsew')
columnOrRow = 2
self.createlabel(interior)
orient = self['orient']
if orient == 'horizontal':
interior.grid_columnconfigure(columnOrRow, weight = 1)
elif orient == 'vertical':
interior.grid_rowconfigure(columnOrRow, weight = 1)
else:
raise ValueError, 'bad orient option ' + repr(orient) + \
': must be either \'horizontal\' or \'vertical\''
# Initialise instance variables.
# List of tuples describing the buttons:
# - name
# - button widget
self._buttonList = []
# The index of the default button.
self._defaultButton = None
self._timerId = None
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self._timerId:
self.after_cancel(self._timerId)
self._timerId = None
MegaWidget.destroy(self)
def numbuttons(self):
return len(self._buttonList)
def index(self, index, forInsert = 0):
listLength = len(self._buttonList)
if type(index) == types.IntType:
if forInsert and index <= listLength:
return index
elif not forInsert and index < listLength:
return index
else:
raise ValueError, 'index "%s" is out of range' % index
elif index is END:
if forInsert:
return listLength
elif listLength > 0:
return listLength - 1
else:
raise ValueError, 'ButtonBox has no buttons'
elif index is DEFAULT:
if self._defaultButton is not None:
return self._defaultButton
raise ValueError, 'ButtonBox has no default'
else:
names = map(lambda t: t[0], self._buttonList)
if index in names:
return names.index(index)
validValues = 'a name, a number, END or DEFAULT'
raise ValueError, \
'bad index "%s": must be %s' % (index, validValues)
def insert(self, componentName, beforeComponent = 0, **kw):
if componentName in self.components():
raise ValueError, 'button "%s" already exists' % componentName
if not kw.has_key('text'):
kw['text'] = componentName
kw['default'] = 'normal'
button = apply(self.createcomponent, (componentName,
(), 'Button',
Tkinter.Button, (self._buttonBoxFrame,)), kw)
index = self.index(beforeComponent, 1)
horizontal = self['orient'] == 'horizontal'
numButtons = len(self._buttonList)
# Shift buttons up one position.
for i in range(numButtons - 1, index - 1, -1):
widget = self._buttonList[i][1]
pos = i * 2 + 3
if horizontal:
widget.grid(column = pos, row = 0)
else:
widget.grid(column = 0, row = pos)
# Display the new button.
if horizontal:
button.grid(column = index * 2 + 1, row = 0, sticky = 'ew',
padx = self['padx'], pady = self['pady'])
self._buttonBoxFrame.grid_columnconfigure(
numButtons * 2 + 2, weight = 1)
else:
button.grid(column = 0, row = index * 2 + 1, sticky = 'ew',
padx = self['padx'], pady = self['pady'])
self._buttonBoxFrame.grid_rowconfigure(
numButtons * 2 + 2, weight = 1)
self._buttonList.insert(index, (componentName, button))
return button
def add(self, componentName, **kw):
return apply(self.insert, (componentName, len(self._buttonList)), kw)
def delete(self, index):
index = self.index(index)
(name, widget) = self._buttonList[index]
widget.grid_forget()
self.destroycomponent(name)
numButtons = len(self._buttonList)
# Shift buttons down one position.
horizontal = self['orient'] == 'horizontal'
for i in range(index + 1, numButtons):
widget = self._buttonList[i][1]
pos = i * 2 - 1
if horizontal:
widget.grid(column = pos, row = 0)
else:
widget.grid(column = 0, row = pos)
if horizontal:
self._buttonBoxFrame.grid_columnconfigure(numButtons * 2 - 1,
minsize = 0)
self._buttonBoxFrame.grid_columnconfigure(numButtons * 2, weight = 0)
else:
self._buttonBoxFrame.grid_rowconfigure(numButtons * 2, weight = 0)
del self._buttonList[index]
def setdefault(self, index):
# Turn off the default ring around the current default button.
if self._defaultButton is not None:
button = self._buttonList[self._defaultButton][1]
button.configure(default = 'normal')
self._defaultButton = None
# Turn on the default ring around the new default button.
if index is not None:
index = self.index(index)
self._defaultButton = index
button = self._buttonList[index][1]
button.configure(default = 'active')
def invoke(self, index = DEFAULT, noFlash = 0):
# Invoke the callback associated with the *index* button. If
# *noFlash* is not set, flash the button to indicate to the
# user that something happened.
button = self._buttonList[self.index(index)][1]
if not noFlash:
state = button.cget('state')
relief = button.cget('relief')
button.configure(state = 'active', relief = 'sunken')
self.update_idletasks()
self.after(100)
button.configure(state = state, relief = relief)
return button.invoke()
def button(self, buttonIndex):
return self._buttonList[self.index(buttonIndex)][1]
def alignbuttons(self, when = 'later'):
if when == 'later':
if not self._timerId:
self._timerId = self.after_idle(self.alignbuttons, 'now')
return
self.update_idletasks()
self._timerId = None
# Determine the width of the maximum length button.
max = 0
horizontal = (self['orient'] == 'horizontal')
for index in range(len(self._buttonList)):
gridIndex = index * 2 + 1
if horizontal:
width = self._buttonBoxFrame.grid_bbox(gridIndex, 0)[2]
else:
width = self._buttonBoxFrame.grid_bbox(0, gridIndex)[2]
if width > max:
max = width
# Set the width of all the buttons to be the same.
if horizontal:
for index in range(len(self._buttonList)):
self._buttonBoxFrame.grid_columnconfigure(index * 2 + 1,
minsize = max)
else:
self._buttonBoxFrame.grid_columnconfigure(0, minsize = max)
######################################################################
### File: PmwEntryField.py
# Based on iwidgets2.2.0/entryfield.itk code.
import re
import string
import types
import Tkinter
# Possible return values of validation functions.
OK = 1
ERROR = 0
PARTIAL = -1
class EntryField(MegaWidget):
_classBindingsDefinedFor = 0
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('command', None, None),
('errorbackground', 'pink', None),
('invalidcommand', self.bell, None),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('modifiedcommand', None, None),
('sticky', 'ew', INITOPT),
('validate', None, self._validate),
('extravalidators', {}, None),
('value', '', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._entryFieldEntry = self.createcomponent('entry',
(), None,
Tkinter.Entry, (interior,))
self._entryFieldEntry.grid(column=2, row=2, sticky=self['sticky'])
if self['value'] != '':
self.__setEntry(self['value'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
self.createlabel(interior)
# Initialise instance variables.
self.normalBackground = None
self._previousText = None
# Initialise instance.
_registerEntryField(self._entryFieldEntry, self)
# Establish the special class bindings if not already done.
# Also create bindings if the Tkinter default interpreter has
# changed. Use Tkinter._default_root to create class
# bindings, so that a reference to root is created by
# bind_class rather than a reference to self, which would
# prevent object cleanup.
if EntryField._classBindingsDefinedFor != Tkinter._default_root:
tagList = self._entryFieldEntry.bindtags()
root = Tkinter._default_root
allSequences = {}
for tag in tagList:
sequences = root.bind_class(tag)
if type(sequences) is types.StringType:
# In old versions of Tkinter, bind_class returns a string
sequences = root.tk.splitlist(sequences)
for sequence in sequences:
allSequences[sequence] = None
for sequence in allSequences.keys():
root.bind_class('EntryFieldPre', sequence, _preProcess)
root.bind_class('EntryFieldPost', sequence, _postProcess)
EntryField._classBindingsDefinedFor = root
self._entryFieldEntry.bindtags(('EntryFieldPre',) +
self._entryFieldEntry.bindtags() + ('EntryFieldPost',))
self._entryFieldEntry.bind('<Return>', self._executeCommand)
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
_deregisterEntryField(self._entryFieldEntry)
MegaWidget.destroy(self)
def _getValidatorFunc(self, validator, index):
# Search the extra and standard validator lists for the
# given 'validator'. If 'validator' is an alias, then
# continue the search using the alias. Make sure that
# self-referencial aliases do not cause infinite loops.
extraValidators = self['extravalidators']
traversedValidators = []
while 1:
traversedValidators.append(validator)
if extraValidators.has_key(validator):
validator = extraValidators[validator][index]
elif _standardValidators.has_key(validator):
validator = _standardValidators[validator][index]
else:
return validator
if validator in traversedValidators:
return validator
def _validate(self):
dict = {
'validator' : None,
'min' : None,
'max' : None,
'minstrict' : 1,
'maxstrict' : 1,
}
opt = self['validate']
if type(opt) is types.DictionaryType:
dict.update(opt)
else:
dict['validator'] = opt
# Look up validator maps and replace 'validator' field with
# the corresponding function.
validator = dict['validator']
valFunction = self._getValidatorFunc(validator, 0)
self._checkValidateFunction(valFunction, 'validate', validator)
dict['validator'] = valFunction
# Look up validator maps and replace 'stringtovalue' field
# with the corresponding function.
if dict.has_key('stringtovalue'):
stringtovalue = dict['stringtovalue']
strFunction = self._getValidatorFunc(stringtovalue, 1)
self._checkValidateFunction(
strFunction, 'stringtovalue', stringtovalue)
else:
strFunction = self._getValidatorFunc(validator, 1)
if strFunction == validator:
strFunction = len
dict['stringtovalue'] = strFunction
self._validationInfo = dict
args = dict.copy()
del args['validator']
del args['min']
del args['max']
del args['minstrict']
del args['maxstrict']
del args['stringtovalue']
self._validationArgs = args
self._previousText = None
if type(dict['min']) == types.StringType and strFunction is not None:
dict['min'] = apply(strFunction, (dict['min'],), args)
if type(dict['max']) == types.StringType and strFunction is not None:
dict['max'] = apply(strFunction, (dict['max'],), args)
self._checkValidity()
def _checkValidateFunction(self, function, option, validator):
# Raise an error if 'function' is not a function or None.
if function is not None and not callable(function):
extraValidators = self['extravalidators']
extra = extraValidators.keys()
extra.sort()
extra = tuple(extra)
standard = _standardValidators.keys()
standard.sort()
standard = tuple(standard)
msg = 'bad %s value "%s": must be a function or one of ' \
'the standard validators %s or extra validators %s'
raise ValueError, msg % (option, validator, standard, extra)
def _executeCommand(self, event = None):
cmd = self['command']
if callable(cmd):
if event is None:
# Return result of command for invoke() method.
return cmd()
else:
cmd()
def _preProcess(self):
self._previousText = self._entryFieldEntry.get()
self._previousICursor = self._entryFieldEntry.index('insert')
self._previousXview = self._entryFieldEntry.index('@0')
if self._entryFieldEntry.selection_present():
self._previousSel= (self._entryFieldEntry.index('sel.first'),
self._entryFieldEntry.index('sel.last'))
else:
self._previousSel = None
def _postProcess(self):
# No need to check if text has not changed.
previousText = self._previousText
if previousText == self._entryFieldEntry.get():
return self.valid()
valid = self._checkValidity()
if self.hulldestroyed():
# The invalidcommand called by _checkValidity() destroyed us.
return valid
cmd = self['modifiedcommand']
if callable(cmd) and previousText != self._entryFieldEntry.get():
cmd()
return valid
def checkentry(self):
# If there is a variable specified by the entry_textvariable
# option, checkentry() should be called after the set() method
# of the variable is called.
self._previousText = None
return self._postProcess()
def _getValidity(self):
text = self._entryFieldEntry.get()
dict = self._validationInfo
args = self._validationArgs
if dict['validator'] is not None:
status = apply(dict['validator'], (text,), args)
if status != OK:
return status
# Check for out of (min, max) range.
if dict['stringtovalue'] is not None:
min = dict['min']
max = dict['max']
if min is None and max is None:
return OK
val = apply(dict['stringtovalue'], (text,), args)
if min is not None and val < min:
if dict['minstrict']:
return ERROR
else:
return PARTIAL
if max is not None and val > max:
if dict['maxstrict']:
return ERROR
else:
return PARTIAL
return OK
def _checkValidity(self):
valid = self._getValidity()
oldValidity = valid
if valid == ERROR:
# The entry is invalid.
cmd = self['invalidcommand']
if callable(cmd):
cmd()
if self.hulldestroyed():
# The invalidcommand destroyed us.
return oldValidity
# Restore the entry to its previous value.
if self._previousText is not None:
self.__setEntry(self._previousText)
self._entryFieldEntry.icursor(self._previousICursor)
self._entryFieldEntry.xview(self._previousXview)
if self._previousSel is not None:
self._entryFieldEntry.selection_range(self._previousSel[0],
self._previousSel[1])
# Check if the saved text is valid as well.
valid = self._getValidity()
self._valid = valid
if self.hulldestroyed():
# The validator or stringtovalue commands called by
# _checkValidity() destroyed us.
return oldValidity
if valid == OK:
if self.normalBackground is not None:
self._entryFieldEntry.configure(
background = self.normalBackground)
self.normalBackground = None
else:
if self.normalBackground is None:
self.normalBackground = self._entryFieldEntry.cget('background')
self._entryFieldEntry.configure(
background = self['errorbackground'])
return oldValidity
def invoke(self):
return self._executeCommand()
def valid(self):
return self._valid == OK
def clear(self):
self.setentry('')
def __setEntry(self, text):
oldState = str(self._entryFieldEntry.cget('state'))
if oldState != 'normal':
self._entryFieldEntry.configure(state='normal')
self._entryFieldEntry.delete(0, 'end')
self._entryFieldEntry.insert(0, text)
if oldState != 'normal':
self._entryFieldEntry.configure(state=oldState)
def setentry(self, text):
self._preProcess()
self.__setEntry(text)
return self._postProcess()
def getvalue(self):
return self._entryFieldEntry.get()
def setvalue(self, text):
return self.setentry(text)
forwardmethods(EntryField, Tkinter.Entry, '_entryFieldEntry')
# ======================================================================
# Entry field validation functions
_numericregex = re.compile('^[0-9]*$')
_alphabeticregex = re.compile('^[a-z]*$', re.IGNORECASE)
_alphanumericregex = re.compile('^[0-9a-z]*$', re.IGNORECASE)
def numericvalidator(text):
if text == '':
return PARTIAL
else:
if _numericregex.match(text) is None:
return ERROR
else:
return OK
def integervalidator(text):
if text in ('', '-', '+'):
return PARTIAL
try:
string.atol(text)
return OK
except ValueError:
return ERROR
def alphabeticvalidator(text):
if _alphabeticregex.match(text) is None:
return ERROR
else:
return OK
def alphanumericvalidator(text):
if _alphanumericregex.match(text) is None:
return ERROR
else:
return OK
def hexadecimalvalidator(text):
if text in ('', '0x', '0X', '+', '+0x', '+0X', '-', '-0x', '-0X'):
return PARTIAL
try:
string.atol(text, 16)
return OK
except ValueError:
return ERROR
def realvalidator(text, separator = '.'):
if separator != '.':
if string.find(text, '.') >= 0:
return ERROR
index = string.find(text, separator)
if index >= 0:
text = text[:index] + '.' + text[index + 1:]
try:
string.atof(text)
return OK
except ValueError:
# Check if the string could be made valid by appending a digit
# eg ('-', '+', '.', '-.', '+.', '1.23e', '1E-').
if len(text) == 0:
return PARTIAL
if text[-1] in string.digits:
return ERROR
try:
string.atof(text + '0')
return PARTIAL
except ValueError:
return ERROR
def timevalidator(text, separator = ':'):
try:
timestringtoseconds(text, separator)
return OK
except ValueError:
if len(text) > 0 and text[0] in ('+', '-'):
text = text[1:]
if re.search('[^0-9' + separator + ']', text) is not None:
return ERROR
return PARTIAL
def datevalidator(text, format = 'ymd', separator = '/'):
try:
datestringtojdn(text, format, separator)
return OK
except ValueError:
if re.search('[^0-9' + separator + ']', text) is not None:
return ERROR
return PARTIAL
_standardValidators = {
'numeric' : (numericvalidator, string.atol),
'integer' : (integervalidator, string.atol),
'hexadecimal' : (hexadecimalvalidator, lambda s: string.atol(s, 16)),
'real' : (realvalidator, stringtoreal),
'alphabetic' : (alphabeticvalidator, len),
'alphanumeric' : (alphanumericvalidator, len),
'time' : (timevalidator, timestringtoseconds),
'date' : (datevalidator, datestringtojdn),
}
_entryCache = {}
def _registerEntryField(entry, entryField):
# Register an EntryField widget for an Entry widget
_entryCache[entry] = entryField
def _deregisterEntryField(entry):
# Deregister an Entry widget
del _entryCache[entry]
def _preProcess(event):
# Forward preprocess events for an Entry to it's EntryField
_entryCache[event.widget]._preProcess()
def _postProcess(event):
# Forward postprocess events for an Entry to it's EntryField
# The function specified by the 'command' option may have destroyed
# the megawidget in a binding earlier in bindtags, so need to check.
if _entryCache.has_key(event.widget):
_entryCache[event.widget]._postProcess()
######################################################################
### File: PmwGroup.py
import string
import Tkinter
def aligngrouptags(groups):
# Adjust the y position of the tags in /groups/ so that they all
# have the height of the highest tag.
maxTagHeight = 0
for group in groups:
if group._tag is None:
height = (string.atoi(str(group._ring.cget('borderwidth'))) +
string.atoi(str(group._ring.cget('highlightthickness'))))
else:
height = group._tag.winfo_reqheight()
if maxTagHeight < height:
maxTagHeight = height
for group in groups:
ringBorder = (string.atoi(str(group._ring.cget('borderwidth'))) +
string.atoi(str(group._ring.cget('highlightthickness'))))
topBorder = maxTagHeight / 2 - ringBorder / 2
group._hull.grid_rowconfigure(0, minsize = topBorder)
group._ring.grid_rowconfigure(0,
minsize = maxTagHeight - topBorder - ringBorder)
if group._tag is not None:
group._tag.place(y = maxTagHeight / 2)
class Group( MegaWidget ):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
#TODO rename collapsedsize to collapsedheight
#after adding collapsedwitdh (Pmw 1.3.3)
#will both stay in place for compatibility...
optiondefs = (
('collapsedsize', 6, INITOPT),
('collapsedheight', 6, INITOPT),
('collapsedwidth', 20, INITOPT),
('ring_borderwidth', 2, None),
('ring_relief', 'groove', None),
('tagindent', 10, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = MegaWidget.interior(self)
self._ring = self.createcomponent(
'ring',
(), None,
Tkinter.Frame, (interior,),
)
self._groupChildSite = self.createcomponent(
'groupchildsite',
(), None,
Tkinter.Frame, (self._ring,)
)
self._tag = self.createcomponent(
'tag',
(), None,
Tkinter.Label, (interior,),
)
ringBorder = (string.atoi(str(self._ring.cget('borderwidth'))) +
string.atoi(str(self._ring.cget('highlightthickness'))))
if self._tag is None:
tagHeight = ringBorder
else:
tagHeight = self._tag.winfo_reqheight()
self._tag.place(
x = ringBorder + self['tagindent'],
y = tagHeight / 2,
anchor = 'w')
topBorder = tagHeight / 2 - ringBorder / 2
self._ring.grid(column = 0, row = 1, sticky = 'nsew')
interior.grid_columnconfigure(0, weight = 1)
interior.grid_rowconfigure(1, weight = 1)
interior.grid_rowconfigure(0, minsize = topBorder)
self._groupChildSite.grid(column = 0, row = 1, sticky = 'nsew')
self._ring.grid_columnconfigure(0, weight = 1)
self._ring.grid_rowconfigure(1, weight = 1)
self._ring.grid_rowconfigure(0,
minsize = tagHeight - topBorder - ringBorder)
self.showing = 1
# Check keywords and initialise options.
self.initialiseoptions()
def toggle(self):
if self.showing:
self.collapse()
else:
self.expand()
self.showing = not self.showing
def expand(self):
self._groupChildSite.grid(column = 0, row = 1, sticky = 'nsew')
def collapse(self):
self._groupChildSite.grid_forget()
#Tracker item 1096289
if self._tag is None:
tagHeight = 0
else:
tagHeight = self._tag.winfo_reqheight()
tagWidth = self._tag.winfo_reqwidth()
self._ring.configure(height=(tagHeight / 2) + self['collapsedheight'],
width = tagWidth + self['collapsedwidth'])
def interior(self):
return self._groupChildSite
######################################################################
### File: PmwLabeledWidget.py
import Tkinter
class LabeledWidget(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('sticky', 'nsew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = MegaWidget.interior(self)
self._labelChildSite = self.createcomponent('labelchildsite',
(), None,
Tkinter.Frame, (interior,))
self._labelChildSite.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
self.createlabel(interior)
# Check keywords and initialise options.
self.initialiseoptions()
def interior(self):
return self._labelChildSite
######################################################################
### File: PmwMainMenuBar.py
# Main menubar
import string
import types
import Tkinter
class MainMenuBar(MegaArchetype):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('balloon', None, None),
('hotkeys', 1, INITOPT),
('hull_tearoff', 0, None),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Menu',))
# Initialise the base class (after defining the options).
MegaArchetype.__init__(self, parent, Tkinter.Menu)
self._menuInfo = {}
self._menuInfo[None] = (None, [])
# Map from a menu name to a tuple of information about the menu.
# The first item in the tuple is the name of the parent menu (for
# toplevel menus this is None). The second item in the tuple is
# a list of status help messages for each item in the menu.
# The key for the information for the main menubar is None.
self._menu = self.interior()
self._menu.bind('<Leave>', self._resetHelpmessage)
self._menu.bind('<Motion>',
lambda event=None, self=self: self._menuHelp(event, None))
# Check keywords and initialise options.
self.initialiseoptions()
def deletemenuitems(self, menuName, start, end = None):
self.component(menuName).delete(start, end)
if end is None:
del self._menuInfo[menuName][1][start]
else:
self._menuInfo[menuName][1][start:end+1] = []
def deletemenu(self, menuName):
"""Delete should be called for cascaded menus before main menus.
"""
parentName = self._menuInfo[menuName][0]
del self._menuInfo[menuName]
if parentName is None:
parentMenu = self._menu
else:
parentMenu = self.component(parentName)
menu = self.component(menuName)
menuId = str(menu)
for item in range(parentMenu.index('end') + 1):
if parentMenu.type(item) == 'cascade':
itemMenu = str(parentMenu.entrycget(item, 'menu'))
if itemMenu == menuId:
parentMenu.delete(item)
del self._menuInfo[parentName][1][item]
break
self.destroycomponent(menuName)
def disableall(self):
for index in range(len(self._menuInfo[None][1])):
self.entryconfigure(index, state = 'disabled')
def enableall(self):
for index in range(len(self._menuInfo[None][1])):
self.entryconfigure(index, state = 'normal')
def addmenu(self, menuName, balloonHelp, statusHelp = None,
traverseSpec = None, **kw):
if statusHelp is None:
statusHelp = balloonHelp
self._addmenu(None, menuName, balloonHelp, statusHelp,
traverseSpec, kw)
def addcascademenu(self, parentMenuName, menuName, statusHelp='',
traverseSpec = None, **kw):
self._addmenu(parentMenuName, menuName, None, statusHelp,
traverseSpec, kw)
def _addmenu(self, parentMenuName, menuName, balloonHelp, statusHelp,
traverseSpec, kw):
if (menuName) in self.components():
raise ValueError, 'menu "%s" already exists' % menuName
menukw = {}
if kw.has_key('tearoff'):
menukw['tearoff'] = kw['tearoff']
del kw['tearoff']
else:
menukw['tearoff'] = 0
if kw.has_key('name'):
menukw['name'] = kw['name']
del kw['name']
if not kw.has_key('label'):
kw['label'] = menuName
self._addHotkeyToOptions(parentMenuName, kw, traverseSpec)
if parentMenuName is None:
parentMenu = self._menu
balloon = self['balloon']
# Bug in Tk: balloon help not implemented
# if balloon is not None:
# balloon.mainmenubind(parentMenu, balloonHelp, statusHelp)
else:
parentMenu = self.component(parentMenuName)
apply(parentMenu.add_cascade, (), kw)
menu = apply(self.createcomponent, (menuName,
(), 'Menu',
Tkinter.Menu, (parentMenu,)), menukw)
parentMenu.entryconfigure('end', menu = menu)
self._menuInfo[parentMenuName][1].append(statusHelp)
self._menuInfo[menuName] = (parentMenuName, [])
menu.bind('<Leave>', self._resetHelpmessage)
menu.bind('<Motion>',
lambda event=None, self=self, menuName=menuName:
self._menuHelp(event, menuName))
def addmenuitem(self, menuName, itemType, statusHelp = '',
traverseSpec = None, **kw):
menu = self.component(menuName)
if itemType != 'separator':
self._addHotkeyToOptions(menuName, kw, traverseSpec)
if itemType == 'command':
command = menu.add_command
elif itemType == 'separator':
command = menu.add_separator
elif itemType == 'checkbutton':
command = menu.add_checkbutton
elif itemType == 'radiobutton':
command = menu.add_radiobutton
elif itemType == 'cascade':
command = menu.add_cascade
else:
raise ValueError, 'unknown menuitem type "%s"' % itemType
self._menuInfo[menuName][1].append(statusHelp)
apply(command, (), kw)
def _addHotkeyToOptions(self, menuName, kw, traverseSpec):
if (not self['hotkeys'] or kw.has_key('underline') or
not kw.has_key('label')):
return
if type(traverseSpec) == types.IntType:
kw['underline'] = traverseSpec
return
if menuName is None:
menu = self._menu
else:
menu = self.component(menuName)
hotkeyList = []
end = menu.index('end')
if end is not None:
for item in range(end + 1):
if menu.type(item) not in ('separator', 'tearoff'):
underline = \
string.atoi(str(menu.entrycget(item, 'underline')))
if underline != -1:
label = str(menu.entrycget(item, 'label'))
if underline < len(label):
hotkey = string.lower(label[underline])
if hotkey not in hotkeyList:
hotkeyList.append(hotkey)
name = kw['label']
if type(traverseSpec) == types.StringType:
lowerLetter = string.lower(traverseSpec)
if traverseSpec in name and lowerLetter not in hotkeyList:
kw['underline'] = string.index(name, traverseSpec)
else:
targets = string.digits + string.letters
lowerName = string.lower(name)
for letter_index in range(len(name)):
letter = lowerName[letter_index]
if letter in targets and letter not in hotkeyList:
kw['underline'] = letter_index
break
def _menuHelp(self, event, menuName):
if menuName is None:
menu = self._menu
index = menu.index('@%d'% event.x)
else:
menu = self.component(menuName)
index = menu.index('@%d'% event.y)
balloon = self['balloon']
if balloon is not None:
if index is None:
balloon.showstatus('')
else:
if str(menu.cget('tearoff')) == '1':
index = index - 1
if index >= 0:
help = self._menuInfo[menuName][1][index]
balloon.showstatus(help)
def _resetHelpmessage(self, event=None):
balloon = self['balloon']
if balloon is not None:
balloon.clearstatus()
forwardmethods(MainMenuBar, Tkinter.Menu, '_hull')
######################################################################
### File: PmwMenuBar.py
# Manager widget for menus.
import string
import types
import Tkinter
class MenuBar(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('balloon', None, None),
('hotkeys', 1, INITOPT),
('padx', 0, INITOPT),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Menu', 'Button'))
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
self._menuInfo = {}
# Map from a menu name to a tuple of information about the menu.
# The first item in the tuple is the name of the parent menu (for
# toplevel menus this is None). The second item in the tuple is
# a list of status help messages for each item in the menu.
# The third item in the tuple is the id of the binding used
# to detect mouse motion to display status help.
# Information for the toplevel menubuttons is not stored here.
self._mydeletecommand = self.component('hull').tk.deletecommand
# Cache this method for use later.
# Check keywords and initialise options.
self.initialiseoptions()
def deletemenuitems(self, menuName, start, end = None):
self.component(menuName + '-menu').delete(start, end)
if end is None:
del self._menuInfo[menuName][1][start]
else:
self._menuInfo[menuName][1][start:end+1] = []
def deletemenu(self, menuName):
"""Delete should be called for cascaded menus before main menus.
"""
# Clean up binding for this menu.
parentName = self._menuInfo[menuName][0]
bindId = self._menuInfo[menuName][2]
_bindtag = 'PmwMenuBar' + str(self) + menuName
self.unbind_class(_bindtag, '<Motion>')
self._mydeletecommand(bindId) # unbind_class does not clean up
del self._menuInfo[menuName]
if parentName is None:
self.destroycomponent(menuName + '-button')
else:
parentMenu = self.component(parentName + '-menu')
menu = self.component(menuName + '-menu')
menuId = str(menu)
for item in range(parentMenu.index('end') + 1):
if parentMenu.type(item) == 'cascade':
itemMenu = str(parentMenu.entrycget(item, 'menu'))
if itemMenu == menuId:
parentMenu.delete(item)
del self._menuInfo[parentName][1][item]
break
self.destroycomponent(menuName + '-menu')
def disableall(self):
for menuName in self._menuInfo.keys():
if self._menuInfo[menuName][0] is None:
menubutton = self.component(menuName + '-button')
menubutton.configure(state = 'disabled')
def enableall(self):
for menuName in self._menuInfo.keys():
if self._menuInfo[menuName][0] is None:
menubutton = self.component(menuName + '-button')
menubutton.configure(state = 'normal')
def addmenu(self, menuName, balloonHelp, statusHelp = None,
side = 'left', traverseSpec = None, **kw):
self._addmenu(None, menuName, balloonHelp, statusHelp,
traverseSpec, side, 'text', kw)
def addcascademenu(self, parentMenuName, menuName, statusHelp = '',
traverseSpec = None, **kw):
self._addmenu(parentMenuName, menuName, None, statusHelp,
traverseSpec, None, 'label', kw)
def _addmenu(self, parentMenuName, menuName, balloonHelp, statusHelp,
traverseSpec, side, textKey, kw):
if (menuName + '-menu') in self.components():
raise ValueError, 'menu "%s" already exists' % menuName
menukw = {}
if kw.has_key('tearoff'):
menukw['tearoff'] = kw['tearoff']
del kw['tearoff']
else:
menukw['tearoff'] = 0
if not kw.has_key(textKey):
kw[textKey] = menuName
self._addHotkeyToOptions(parentMenuName, kw, textKey, traverseSpec)
if parentMenuName is None:
button = apply(self.createcomponent, (menuName + '-button',
(), 'Button',
Tkinter.Menubutton, (self.interior(),)), kw)
button.pack(side=side, padx = self['padx'])
balloon = self['balloon']
if balloon is not None:
balloon.bind(button, balloonHelp, statusHelp)
parentMenu = button
else:
parentMenu = self.component(parentMenuName + '-menu')
apply(parentMenu.add_cascade, (), kw)
self._menuInfo[parentMenuName][1].append(statusHelp)
menu = apply(self.createcomponent, (menuName + '-menu',
(), 'Menu',
Tkinter.Menu, (parentMenu,)), menukw)
if parentMenuName is None:
button.configure(menu = menu)
else:
parentMenu.entryconfigure('end', menu = menu)
# Need to put this binding after the class bindings so that
# menu.index() does not lag behind.
_bindtag = 'PmwMenuBar' + str(self) + menuName
bindId = self.bind_class(_bindtag, '<Motion>',
lambda event=None, self=self, menuName=menuName:
self._menuHelp(menuName))
menu.bindtags(menu.bindtags() + (_bindtag,))
menu.bind('<Leave>', self._resetHelpmessage)
self._menuInfo[menuName] = (parentMenuName, [], bindId)
def addmenuitem(self, menuName, itemType, statusHelp = '',
traverseSpec = None, **kw):
menu = self.component(menuName + '-menu')
if itemType != 'separator':
self._addHotkeyToOptions(menuName, kw, 'label', traverseSpec)
if itemType == 'command':
command = menu.add_command
elif itemType == 'separator':
command = menu.add_separator
elif itemType == 'checkbutton':
command = menu.add_checkbutton
elif itemType == 'radiobutton':
command = menu.add_radiobutton
elif itemType == 'cascade':
command = menu.add_cascade
else:
raise ValueError, 'unknown menuitem type "%s"' % itemType
self._menuInfo[menuName][1].append(statusHelp)
apply(command, (), kw)
def _addHotkeyToOptions(self, menuName, kw, textKey, traverseSpec):
if (not self['hotkeys'] or kw.has_key('underline') or
not kw.has_key(textKey)):
return
if type(traverseSpec) == types.IntType:
kw['underline'] = traverseSpec
return
hotkeyList = []
if menuName is None:
for menuName in self._menuInfo.keys():
if self._menuInfo[menuName][0] is None:
menubutton = self.component(menuName + '-button')
underline = string.atoi(str(menubutton.cget('underline')))
if underline != -1:
label = str(menubutton.cget(textKey))
if underline < len(label):
hotkey = string.lower(label[underline])
if hotkey not in hotkeyList:
hotkeyList.append(hotkey)
else:
menu = self.component(menuName + '-menu')
end = menu.index('end')
if end is not None:
for item in range(end + 1):
if menu.type(item) not in ('separator', 'tearoff'):
underline = string.atoi(
str(menu.entrycget(item, 'underline')))
if underline != -1:
label = str(menu.entrycget(item, textKey))
if underline < len(label):
hotkey = string.lower(label[underline])
if hotkey not in hotkeyList:
hotkeyList.append(hotkey)
name = kw[textKey]
if type(traverseSpec) == types.StringType:
lowerLetter = string.lower(traverseSpec)
if traverseSpec in name and lowerLetter not in hotkeyList:
kw['underline'] = string.index(name, traverseSpec)
else:
targets = string.digits + string.letters
lowerName = string.lower(name)
for letter_index in range(len(name)):
letter = lowerName[letter_index]
if letter in targets and letter not in hotkeyList:
kw['underline'] = letter_index
break
def _menuHelp(self, menuName):
menu = self.component(menuName + '-menu')
index = menu.index('active')
balloon = self['balloon']
if balloon is not None:
if index is None:
balloon.showstatus('')
else:
if str(menu.cget('tearoff')) == '1':
index = index - 1
if index >= 0:
help = self._menuInfo[menuName][1][index]
balloon.showstatus(help)
def _resetHelpmessage(self, event=None):
balloon = self['balloon']
if balloon is not None:
balloon.clearstatus()
######################################################################
### File: PmwMessageBar.py
# Class to display messages in an information line.
import string
import Tkinter
class MessageBar(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
defaultMessageTypes = {
# (priority, showtime, bells, logmessage)
'systemerror' : (5, 10, 2, 1),
'usererror' : (4, 5, 1, 0),
'busy' : (3, 0, 0, 0),
'systemevent' : (2, 5, 0, 0),
'userevent' : (2, 5, 0, 0),
'help' : (1, 5, 0, 0),
'state' : (0, 0, 0, 0),
}
optiondefs = (
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('messagetypes', defaultMessageTypes, INITOPT),
('silent', 0, None),
('sticky', 'ew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._messageBarEntry = self.createcomponent('entry',
(), None,
Tkinter.Entry, (interior,))
# Can't always use 'disabled', since this greys out text in Tk 8.4.2
try:
self._messageBarEntry.configure(state = 'readonly')
except Tkinter.TclError:
self._messageBarEntry.configure(state = 'disabled')
self._messageBarEntry.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
self.createlabel(interior)
# Initialise instance variables.
self._numPriorities = 0
for info in self['messagetypes'].values():
if self._numPriorities < info[0]:
self._numPriorities = info[0]
self._numPriorities = self._numPriorities + 1
self._timer = [None] * self._numPriorities
self._messagetext = [''] * self._numPriorities
self._activemessage = [0] * self._numPriorities
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
for timerId in self._timer:
if timerId is not None:
self.after_cancel(timerId)
self._timer = [None] * self._numPriorities
MegaWidget.destroy(self)
def message(self, type, text):
# Display a message in the message bar.
(priority, showtime, bells, logmessage) = self['messagetypes'][type]
if not self['silent']:
for i in range(bells):
if i != 0:
self.after(100)
self.bell()
self._activemessage[priority] = 1
if text is None:
text = ''
self._messagetext[priority] = string.replace(text, '\n', ' ')
self._redisplayInfoMessage()
if logmessage:
# Should log this text to a text widget.
pass
if showtime > 0:
if self._timer[priority] is not None:
self.after_cancel(self._timer[priority])
# Define a callback to clear this message after a time.
def _clearmessage(self=self, priority=priority):
self._clearActivemessage(priority)
mseconds = int(showtime * 1000)
self._timer[priority] = self.after(mseconds, _clearmessage)
def helpmessage(self, text):
if text is None:
self.resetmessages('help')
else:
self.message('help', text)
def resetmessages(self, type):
priority = self['messagetypes'][type][0]
self._clearActivemessage(priority)
for messagetype, info in self['messagetypes'].items():
thisPriority = info[0]
showtime = info[1]
if thisPriority < priority and showtime != 0:
self._clearActivemessage(thisPriority)
def _clearActivemessage(self, priority):
self._activemessage[priority] = 0
if self._timer[priority] is not None:
self.after_cancel(self._timer[priority])
self._timer[priority] = None
self._redisplayInfoMessage()
def _redisplayInfoMessage(self):
text = ''
for priority in range(self._numPriorities - 1, -1, -1):
if self._activemessage[priority]:
text = self._messagetext[priority]
break
self._messageBarEntry.configure(state = 'normal')
self._messageBarEntry.delete(0, 'end')
self._messageBarEntry.insert('end', text)
# Can't always use 'disabled', since this greys out text in Tk 8.4.2
try:
self._messageBarEntry.configure(state = 'readonly')
except Tkinter.TclError:
self._messageBarEntry.configure(state = 'disabled')
forwardmethods(MessageBar, Tkinter.Entry, '_messageBarEntry')
######################################################################
### File: PmwMessageDialog.py
# Based on iwidgets2.2.0/messagedialog.itk code.
import Tkinter
class MessageDialog(Dialog):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 20, INITOPT),
('bordery', 20, INITOPT),
('iconmargin', 20, INITOPT),
('iconpos', None, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
self._message = self.createcomponent('message',
(), None,
Tkinter.Label, (interior,))
iconpos = self['iconpos']
iconmargin = self['iconmargin']
borderx = self['borderx']
bordery = self['bordery']
border_right = 2
border_bottom = 2
if iconpos is None:
self._message.grid(column = 1, row = 1)
else:
self._icon = self.createcomponent('icon',
(), None,
Tkinter.Label, (interior,))
if iconpos not in 'nsew':
raise ValueError, \
'bad iconpos option "%s": should be n, s, e, or w' \
% iconpos
if iconpos in 'nw':
icon = 1
message = 3
else:
icon = 3
message = 1
if iconpos in 'ns':
# vertical layout
self._icon.grid(column = 1, row = icon)
self._message.grid(column = 1, row = message)
interior.grid_rowconfigure(2, minsize = iconmargin)
border_bottom = 4
else:
# horizontal layout
self._icon.grid(column = icon, row = 1)
self._message.grid(column = message, row = 1)
interior.grid_columnconfigure(2, minsize = iconmargin)
border_right = 4
interior.grid_columnconfigure(0, minsize = borderx)
interior.grid_rowconfigure(0, minsize = bordery)
interior.grid_columnconfigure(border_right, minsize = borderx)
interior.grid_rowconfigure(border_bottom, minsize = bordery)
# Check keywords and initialise options.
self.initialiseoptions()
######################################################################
### File: PmwNoteBook.py
import string
import types
import Tkinter
class NoteBook(MegaArchetype):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('hull_highlightthickness', 0, None),
('hull_borderwidth', 0, None),
('arrownavigation', 1, INITOPT),
('borderwidth', 2, INITOPT),
('createcommand', None, None),
('lowercommand', None, None),
('pagemargin', 4, INITOPT),
('raisecommand', None, None),
('tabpos', 'n', INITOPT),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Page', 'Tab'))
# Initialise the base class (after defining the options).
MegaArchetype.__init__(self, parent, Tkinter.Canvas)
self.bind('<Map>', self._handleMap)
self.bind('<Configure>', self._handleConfigure)
tabpos = self['tabpos']
if tabpos is not None and tabpos != 'n':
raise ValueError, \
'bad tabpos option %s: should be n or None' % repr(tabpos)
self._withTabs = (tabpos is not None)
self._pageMargin = self['pagemargin']
self._borderWidth = self['borderwidth']
# Use a dictionary as a set of bits indicating what needs to
# be redisplayed the next time _layout() is called. If
# dictionary contains 'topPage' key, the value is the new top
# page to be displayed. None indicates that all pages have
# been deleted and that _layout() should draw a border under where
# the tabs should be.
self._pending = {}
self._pending['size'] = 1
self._pending['borderColor'] = 1
self._pending['topPage'] = None
if self._withTabs:
self._pending['tabs'] = 1
self._canvasSize = None # This gets set by <Configure> events
# Set initial height of space for tabs
if self._withTabs:
self.tabBottom = 35
else:
self.tabBottom = 0
self._lightBorderColor, self._darkBorderColor = \
Color.bordercolors(self, self['hull_background'])
self._pageNames = [] # List of page names
# Map from page name to page info. Each item is itself a
# dictionary containing the following items:
# page the Tkinter.Frame widget for the page
# created set to true the first time the page is raised
# tabbutton the Tkinter.Button widget for the button (if any)
# tabreqwidth requested width of the tab
# tabreqheight requested height of the tab
# tabitems the canvas items for the button: the button
# window item, the lightshadow and the darkshadow
# left the left and right canvas coordinates of the tab
# right
self._pageAttrs = {}
# Name of page currently on top (actually displayed, using
# create_window, not pending). Ignored if current top page
# has been deleted or new top page is pending. None indicates
# no pages in notebook.
self._topPageName = None
# Canvas items used:
# Per tab:
# top and left shadow
# right shadow
# button
# Per notebook:
# page
# top page
# left shadow
# bottom and right shadow
# top (one or two items)
# Canvas tags used:
# lighttag - top and left shadows of tabs and page
# darktag - bottom and right shadows of tabs and page
# (if no tabs then these are reversed)
# (used to color the borders by recolorborders)
# Create page border shadows.
if self._withTabs:
self._pageLeftBorder = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._lightBorderColor, tags = 'lighttag')
self._pageBottomRightBorder = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._darkBorderColor, tags = 'darktag')
self._pageTop1Border = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._darkBorderColor, tags = 'lighttag')
self._pageTop2Border = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._darkBorderColor, tags = 'lighttag')
else:
self._pageLeftBorder = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._darkBorderColor, tags = 'darktag')
self._pageBottomRightBorder = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._lightBorderColor, tags = 'lighttag')
self._pageTopBorder = self.create_polygon(0, 0, 0, 0, 0, 0,
fill = self._darkBorderColor, tags = 'darktag')
# Check keywords and initialise options.
self.initialiseoptions()
def insert(self, pageName, before = 0, **kw):
if self._pageAttrs.has_key(pageName):
msg = 'Page "%s" already exists.' % pageName
raise ValueError, msg
# Do this early to catch bad <before> spec before creating any items.
beforeIndex = self.index(before, 1)
pageOptions = {}
if self._withTabs:
# Default tab button options.
tabOptions = {
'text' : pageName,
'borderwidth' : 0,
}
# Divide the keyword options into the 'page_' and 'tab_' options.
for key in kw.keys():
if key[:5] == 'page_':
pageOptions[key[5:]] = kw[key]
del kw[key]
elif self._withTabs and key[:4] == 'tab_':
tabOptions[key[4:]] = kw[key]
del kw[key]
else:
raise KeyError, 'Unknown option "' + key + '"'
# Create the frame to contain the page.
page = apply(self.createcomponent, (pageName,
(), 'Page',
Tkinter.Frame, self._hull), pageOptions)
attributes = {}
attributes['page'] = page
attributes['created'] = 0
if self._withTabs:
# Create the button for the tab.
def raiseThisPage(self = self, pageName = pageName):
self.selectpage(pageName)
tabOptions['command'] = raiseThisPage
tab = apply(self.createcomponent, (pageName + '-tab',
(), 'Tab',
Tkinter.Button, self._hull), tabOptions)
if self['arrownavigation']:
# Allow the use of the arrow keys for Tab navigation:
def next(event, self = self, pageName = pageName):
self.nextpage(pageName)
def prev(event, self = self, pageName = pageName):
self.previouspage(pageName)
tab.bind('<Left>', prev)
tab.bind('<Right>', next)
attributes['tabbutton'] = tab
attributes['tabreqwidth'] = tab.winfo_reqwidth()
attributes['tabreqheight'] = tab.winfo_reqheight()
# Create the canvas item to manage the tab's button and the items
# for the tab's shadow.
windowitem = self.create_window(0, 0, window = tab, anchor = 'nw')
lightshadow = self.create_polygon(0, 0, 0, 0, 0, 0,
tags = 'lighttag', fill = self._lightBorderColor)
darkshadow = self.create_polygon(0, 0, 0, 0, 0, 0,
tags = 'darktag', fill = self._darkBorderColor)
attributes['tabitems'] = (windowitem, lightshadow, darkshadow)
self._pending['tabs'] = 1
self._pageAttrs[pageName] = attributes
self._pageNames.insert(beforeIndex, pageName)
# If this is the first page added, make it the new top page
# and call the create and raise callbacks.
if self.getcurselection() is None:
self._pending['topPage'] = pageName
self._raiseNewTop(pageName)
self._layout()
return page
def add(self, pageName, **kw):
return apply(self.insert, (pageName, len(self._pageNames)), kw)
def delete(self, *pageNames):
newTopPage = 0
for page in pageNames:
pageIndex = self.index(page)
pageName = self._pageNames[pageIndex]
pageInfo = self._pageAttrs[pageName]
if self.getcurselection() == pageName:
if len(self._pageNames) == 1:
newTopPage = 0
self._pending['topPage'] = None
elif pageIndex == len(self._pageNames) - 1:
newTopPage = 1
self._pending['topPage'] = self._pageNames[pageIndex - 1]
else:
newTopPage = 1
self._pending['topPage'] = self._pageNames[pageIndex + 1]
if self._topPageName == pageName:
self._hull.delete(self._topPageItem)
self._topPageName = None
if self._withTabs:
self.destroycomponent(pageName + '-tab')
apply(self._hull.delete, pageInfo['tabitems'])
self.destroycomponent(pageName)
del self._pageAttrs[pageName]
del self._pageNames[pageIndex]
# If the old top page was deleted and there are still pages
# left in the notebook, call the create and raise callbacks.
if newTopPage:
pageName = self._pending['topPage']
self._raiseNewTop(pageName)
if self._withTabs:
self._pending['tabs'] = 1
self._layout()
def page(self, pageIndex):
pageName = self._pageNames[self.index(pageIndex)]
return self._pageAttrs[pageName]['page']
def pagenames(self):
return list(self._pageNames)
def getcurselection(self):
if self._pending.has_key('topPage'):
return self._pending['topPage']
else:
return self._topPageName
def tab(self, pageIndex):
if self._withTabs:
pageName = self._pageNames[self.index(pageIndex)]
return self._pageAttrs[pageName]['tabbutton']
else:
return None
def index(self, index, forInsert = 0):
listLength = len(self._pageNames)
if type(index) == types.IntType:
if forInsert and index <= listLength:
return index
elif not forInsert and index < listLength:
return index
else:
raise ValueError, 'index "%s" is out of range' % index
elif index is END:
if forInsert:
return listLength
elif listLength > 0:
return listLength - 1
else:
raise ValueError, 'NoteBook has no pages'
elif index is SELECT:
if listLength == 0:
raise ValueError, 'NoteBook has no pages'
return self._pageNames.index(self.getcurselection())
else:
if index in self._pageNames:
return self._pageNames.index(index)
validValues = 'a name, a number, END or SELECT'
raise ValueError, \
'bad index "%s": must be %s' % (index, validValues)
def selectpage(self, page):
pageName = self._pageNames[self.index(page)]
oldTopPage = self.getcurselection()
if pageName != oldTopPage:
self._pending['topPage'] = pageName
if oldTopPage == self._topPageName:
self._hull.delete(self._topPageItem)
cmd = self['lowercommand']
if cmd is not None:
cmd(oldTopPage)
self._raiseNewTop(pageName)
self._layout()
# Set focus to the tab of new top page:
if self._withTabs and self['arrownavigation']:
self._pageAttrs[pageName]['tabbutton'].focus_set()
def previouspage(self, pageIndex = None):
if pageIndex is None:
curpage = self.index(SELECT)
else:
curpage = self.index(pageIndex)
if curpage > 0:
self.selectpage(curpage - 1)
def nextpage(self, pageIndex = None):
if pageIndex is None:
curpage = self.index(SELECT)
else:
curpage = self.index(pageIndex)
if curpage < len(self._pageNames) - 1:
self.selectpage(curpage + 1)
def setnaturalsize(self, pageNames = None):
self.update_idletasks()
maxPageWidth = 1
maxPageHeight = 1
if pageNames is None:
pageNames = self.pagenames()
for pageName in pageNames:
pageInfo = self._pageAttrs[pageName]
page = pageInfo['page']
w = page.winfo_reqwidth()
h = page.winfo_reqheight()
if maxPageWidth < w:
maxPageWidth = w
if maxPageHeight < h:
maxPageHeight = h
pageBorder = self._borderWidth + self._pageMargin
width = maxPageWidth + pageBorder * 2
height = maxPageHeight + pageBorder * 2
if self._withTabs:
maxTabHeight = 0
for pageInfo in self._pageAttrs.values():
if maxTabHeight < pageInfo['tabreqheight']:
maxTabHeight = pageInfo['tabreqheight']
height = height + maxTabHeight + self._borderWidth * 1.5
# Note that, since the hull is a canvas, the width and height
# options specify the geometry *inside* the borderwidth and
# highlightthickness.
self.configure(hull_width = width, hull_height = height)
def recolorborders(self):
self._pending['borderColor'] = 1
self._layout()
def _handleMap(self, event):
self._layout()
def _handleConfigure(self, event):
self._canvasSize = (event.width, event.height)
self._pending['size'] = 1
self._layout()
def _raiseNewTop(self, pageName):
if not self._pageAttrs[pageName]['created']:
self._pageAttrs[pageName]['created'] = 1
cmd = self['createcommand']
if cmd is not None:
cmd(pageName)
cmd = self['raisecommand']
if cmd is not None:
cmd(pageName)
# This is the vertical layout of the notebook, from top (assuming
# tabpos is 'n'):
# hull highlightthickness (top)
# hull borderwidth (top)
# borderwidth (top border of tabs)
# borderwidth * 0.5 (space for bevel)
# tab button (maximum of requested height of all tab buttons)
# borderwidth (border between tabs and page)
# pagemargin (top)
# the page itself
# pagemargin (bottom)
# borderwidth (border below page)
# hull borderwidth (bottom)
# hull highlightthickness (bottom)
#
# canvasBorder is sum of top two elements.
# tabBottom is sum of top five elements.
#
# Horizontal layout (and also vertical layout when tabpos is None):
# hull highlightthickness
# hull borderwidth
# borderwidth
# pagemargin
# the page itself
# pagemargin
# borderwidth
# hull borderwidth
# hull highlightthickness
#
def _layout(self):
if not self.winfo_ismapped() or self._canvasSize is None:
# Don't layout if the window is not displayed, or we
# haven't yet received a <Configure> event.
return
hullWidth, hullHeight = self._canvasSize
borderWidth = self._borderWidth
canvasBorder = string.atoi(self._hull['borderwidth']) + \
string.atoi(self._hull['highlightthickness'])
if not self._withTabs:
self.tabBottom = canvasBorder
oldTabBottom = self.tabBottom
if self._pending.has_key('borderColor'):
self._lightBorderColor, self._darkBorderColor = \
Color.bordercolors(self, self['hull_background'])
# Draw all the tabs.
if self._withTabs and (self._pending.has_key('tabs') or
self._pending.has_key('size')):
# Find total requested width and maximum requested height
# of tabs.
sumTabReqWidth = 0
maxTabHeight = 0
for pageInfo in self._pageAttrs.values():
sumTabReqWidth = sumTabReqWidth + pageInfo['tabreqwidth']
if maxTabHeight < pageInfo['tabreqheight']:
maxTabHeight = pageInfo['tabreqheight']
if maxTabHeight != 0:
# Add the top tab border plus a bit for the angled corners
self.tabBottom = canvasBorder + maxTabHeight + borderWidth * 1.5
# Prepare for drawing the border around each tab button.
tabTop = canvasBorder
tabTop2 = tabTop + borderWidth
tabTop3 = tabTop + borderWidth * 1.5
tabBottom2 = self.tabBottom
tabBottom = self.tabBottom + borderWidth
numTabs = len(self._pageNames)
availableWidth = hullWidth - 2 * canvasBorder - \
numTabs * 2 * borderWidth
x = canvasBorder
cumTabReqWidth = 0
cumTabWidth = 0
# Position all the tabs.
for pageName in self._pageNames:
pageInfo = self._pageAttrs[pageName]
(windowitem, lightshadow, darkshadow) = pageInfo['tabitems']
if sumTabReqWidth <= availableWidth:
tabwidth = pageInfo['tabreqwidth']
else:
# This ugly calculation ensures that, when the
# notebook is not wide enough for the requested
# widths of the tabs, the total width given to
# the tabs exactly equals the available width,
# without rounding errors.
cumTabReqWidth = cumTabReqWidth + pageInfo['tabreqwidth']
tmp = (2*cumTabReqWidth*availableWidth + sumTabReqWidth) \
/ (2 * sumTabReqWidth)
tabwidth = tmp - cumTabWidth
cumTabWidth = tmp
# Position the tab's button canvas item.
self.coords(windowitem, x + borderWidth, tabTop3)
self.itemconfigure(windowitem,
width = tabwidth, height = maxTabHeight)
# Make a beautiful border around the tab.
left = x
left2 = left + borderWidth
left3 = left + borderWidth * 1.5
right = left + tabwidth + 2 * borderWidth
right2 = left + tabwidth + borderWidth
right3 = left + tabwidth + borderWidth * 0.5
self.coords(lightshadow,
left, tabBottom2, left, tabTop2, left2, tabTop,
right2, tabTop, right3, tabTop2, left3, tabTop2,
left2, tabTop3, left2, tabBottom,
)
self.coords(darkshadow,
right2, tabTop, right, tabTop2, right, tabBottom2,
right2, tabBottom, right2, tabTop3, right3, tabTop2,
)
pageInfo['left'] = left
pageInfo['right'] = right
x = x + tabwidth + 2 * borderWidth
# Redraw shadow under tabs so that it appears that tab for old
# top page is lowered and that tab for new top page is raised.
if self._withTabs and (self._pending.has_key('topPage') or
self._pending.has_key('tabs') or self._pending.has_key('size')):
if self.getcurselection() is None:
# No pages, so draw line across top of page area.
self.coords(self._pageTop1Border,
canvasBorder, self.tabBottom,
hullWidth - canvasBorder, self.tabBottom,
hullWidth - canvasBorder - borderWidth,
self.tabBottom + borderWidth,
borderWidth + canvasBorder, self.tabBottom + borderWidth,
)
# Ignore second top border.
self.coords(self._pageTop2Border, 0, 0, 0, 0, 0, 0)
else:
# Draw two lines, one on each side of the tab for the
# top page, so that the tab appears to be raised.
pageInfo = self._pageAttrs[self.getcurselection()]
left = pageInfo['left']
right = pageInfo['right']
self.coords(self._pageTop1Border,
canvasBorder, self.tabBottom,
left, self.tabBottom,
left + borderWidth, self.tabBottom + borderWidth,
canvasBorder + borderWidth, self.tabBottom + borderWidth,
)
self.coords(self._pageTop2Border,
right, self.tabBottom,
hullWidth - canvasBorder, self.tabBottom,
hullWidth - canvasBorder - borderWidth,
self.tabBottom + borderWidth,
right - borderWidth, self.tabBottom + borderWidth,
)
# Prevent bottom of dark border of tabs appearing over
# page top border.
self.tag_raise(self._pageTop1Border)
self.tag_raise(self._pageTop2Border)
# Position the page border shadows.
if self._pending.has_key('size') or oldTabBottom != self.tabBottom:
self.coords(self._pageLeftBorder,
canvasBorder, self.tabBottom,
borderWidth + canvasBorder,
self.tabBottom + borderWidth,
borderWidth + canvasBorder,
hullHeight - canvasBorder - borderWidth,
canvasBorder, hullHeight - canvasBorder,
)
self.coords(self._pageBottomRightBorder,
hullWidth - canvasBorder, self.tabBottom,
hullWidth - canvasBorder, hullHeight - canvasBorder,
canvasBorder, hullHeight - canvasBorder,
borderWidth + canvasBorder,
hullHeight - canvasBorder - borderWidth,
hullWidth - canvasBorder - borderWidth,
hullHeight - canvasBorder - borderWidth,
hullWidth - canvasBorder - borderWidth,
self.tabBottom + borderWidth,
)
if not self._withTabs:
self.coords(self._pageTopBorder,
canvasBorder, self.tabBottom,
hullWidth - canvasBorder, self.tabBottom,
hullWidth - canvasBorder - borderWidth,
self.tabBottom + borderWidth,
borderWidth + canvasBorder, self.tabBottom + borderWidth,
)
# Color borders.
if self._pending.has_key('borderColor'):
self.itemconfigure('lighttag', fill = self._lightBorderColor)
self.itemconfigure('darktag', fill = self._darkBorderColor)
newTopPage = self._pending.get('topPage')
pageBorder = borderWidth + self._pageMargin
# Raise new top page.
if newTopPage is not None:
self._topPageName = newTopPage
self._topPageItem = self.create_window(
pageBorder + canvasBorder, self.tabBottom + pageBorder,
window = self._pageAttrs[newTopPage]['page'],
anchor = 'nw',
)
# Change position of top page if tab height has changed.
if self._topPageName is not None and oldTabBottom != self.tabBottom:
self.coords(self._topPageItem,
pageBorder + canvasBorder, self.tabBottom + pageBorder)
# Change size of top page if,
# 1) there is a new top page.
# 2) canvas size has changed, but not if there is no top
# page (eg: initially or when all pages deleted).
# 3) tab height has changed, due to difference in the height of a tab
if (newTopPage is not None or \
self._pending.has_key('size') and self._topPageName is not None
or oldTabBottom != self.tabBottom):
self.itemconfigure(self._topPageItem,
width = hullWidth - 2 * canvasBorder - pageBorder * 2,
height = hullHeight - 2 * canvasBorder - pageBorder * 2 -
(self.tabBottom - canvasBorder),
)
self._pending = {}
# Need to do forwarding to get the pack, grid, etc methods.
# Unfortunately this means that all the other canvas methods are also
# forwarded.
forwardmethods(NoteBook, Tkinter.Canvas, '_hull')
######################################################################
### File: PmwOptionMenu.py
import types
import Tkinter
import sys
class OptionMenu(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('command', None, None),
('items', (), INITOPT),
('initialitem', None, INITOPT),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('sticky', 'ew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._menubutton = self.createcomponent('menubutton',
(), None,
Tkinter.Menubutton, (interior,),
borderwidth = 2,
indicatoron = 1,
relief = 'raised',
anchor = 'c',
highlightthickness = 2,
direction = 'flush',
takefocus = 1,
)
self._menubutton.grid(column = 2, row = 2, sticky = self['sticky'])
self._menu = self.createcomponent('menu',
(), None,
Tkinter.Menu, (self._menubutton,),
tearoff=0
)
self._menubutton.configure(menu = self._menu)
interior.grid_columnconfigure(2, weight = 1)
interior.grid_rowconfigure(2, weight = 1)
# Create the label.
self.createlabel(interior)
# Add the items specified by the initialisation option.
self._itemList = []
self.setitems(self['items'], self['initialitem'])
# Check keywords and initialise options.
self.initialiseoptions()
def setitems(self, items, index = None):
#cleaning up old items only required for Python < 2.5.4
if sys.version_info < (2, 5, 4):
# Clean up old items and callback commands.
for oldIndex in range(len(self._itemList)):
tclCommandName = str(self._menu.entrycget(oldIndex, 'command'))
if tclCommandName != '':
self._menu.deletecommand(tclCommandName)
self._menu.delete(0, 'end')
self._itemList = list(items)
# Set the items in the menu component.
for item in items:
self._menu.add_command(label = item,
command = lambda self = self, item = item: self._invoke(item))
# Set the currently selected value.
if index is None:
var = str(self._menubutton.cget('textvariable'))
if var != '':
# None means do not change text variable.
return
if len(items) == 0:
text = ''
elif str(self._menubutton.cget('text')) in items:
# Do not change selection if it is still valid
return
else:
text = items[0]
else:
index = self.index(index)
text = self._itemList[index]
self.setvalue(text)
def getcurselection(self):
var = str(self._menubutton.cget('textvariable'))
if var == '':
return str(self._menubutton.cget('text'))
else:
return self._menu.tk.globalgetvar(var)
def getvalue(self):
return self.getcurselection()
def setvalue(self, text):
var = str(self._menubutton.cget('textvariable'))
if var == '':
self._menubutton.configure(text = text)
else:
self._menu.tk.globalsetvar(var, text)
def index(self, index):
listLength = len(self._itemList)
if type(index) == types.IntType:
if index < listLength:
return index
else:
raise ValueError, 'index "%s" is out of range' % index
elif index is END:
if listLength > 0:
return listLength - 1
else:
raise ValueError, 'OptionMenu has no items'
else:
if index is SELECT:
if listLength > 0:
index = self.getcurselection()
else:
raise ValueError, 'OptionMenu has no items'
if index in self._itemList:
return self._itemList.index(index)
raise ValueError, \
'bad index "%s": must be a ' \
'name, a number, END or SELECT' % (index,)
def invoke(self, index = SELECT):
index = self.index(index)
text = self._itemList[index]
return self._invoke(text)
def _invoke(self, text):
self.setvalue(text)
command = self['command']
if callable(command):
return command(text)
######################################################################
### File: PmwPanedWidget.py
# PanedWidget
# a frame which may contain several resizable sub-frames
import string
import sys
import types
import Tkinter
class PanedWidget(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('command', None, None),
('orient', 'vertical', INITOPT),
('separatorrelief', 'sunken', INITOPT),
('separatorthickness', 2, INITOPT),
('handlesize', 8, INITOPT),
('hull_width', 400, None),
('hull_height', 400, None),
)
self.defineoptions(kw, optiondefs,
dynamicGroups = ('Frame', 'Separator', 'Handle'))
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
self.bind('<Configure>', self._handleConfigure)
if self['orient'] not in ('horizontal', 'vertical'):
raise ValueError, 'bad orient option ' + repr(self['orient']) + \
': must be either \'horizontal\' or \'vertical\''
self._separatorThickness = self['separatorthickness']
self._handleSize = self['handlesize']
self._paneNames = [] # List of pane names
self._paneAttrs = {} # Map from pane name to pane info
self._timerId = None
self._frame = {}
self._separator = []
self._button = []
self._totalSize = 0
self._movePending = 0
self._relsize = {}
self._relmin = {}
self._relmax = {}
self._size = {}
self._min = {}
self._max = {}
self._rootp = None
self._curSize = None
self._beforeLimit = None
self._afterLimit = None
self._buttonIsDown = 0
self._majorSize = 100
self._minorSize = 100
# Check keywords and initialise options.
self.initialiseoptions()
def insert(self, name, before = 0, **kw):
# Parse <kw> for options.
self._initPaneOptions(name)
self._parsePaneOptions(name, kw)
insertPos = self._nameToIndex(before)
atEnd = (insertPos == len(self._paneNames))
# Add the frame.
self._paneNames[insertPos:insertPos] = [name]
self._frame[name] = self.createcomponent(name,
(), 'Frame',
Tkinter.Frame, (self.interior(),))
# Add separator, if necessary.
if len(self._paneNames) > 1:
self._addSeparator()
else:
self._separator.append(None)
self._button.append(None)
# Add the new frame and adjust the PanedWidget
if atEnd:
size = self._size[name]
if size > 0 or self._relsize[name] is not None:
if self['orient'] == 'vertical':
self._frame[name].place(x=0, relwidth=1,
height=size, y=self._totalSize)
else:
self._frame[name].place(y=0, relheight=1,
width=size, x=self._totalSize)
else:
if self['orient'] == 'vertical':
self._frame[name].place(x=0, relwidth=1,
y=self._totalSize)
else:
self._frame[name].place(y=0, relheight=1,
x=self._totalSize)
else:
self._updateSizes()
self._totalSize = self._totalSize + self._size[name]
return self._frame[name]
def add(self, name, **kw):
return apply(self.insert, (name, len(self._paneNames)), kw)
def delete(self, name):
deletePos = self._nameToIndex(name)
name = self._paneNames[deletePos]
self.destroycomponent(name)
del self._paneNames[deletePos]
del self._frame[name]
del self._size[name]
del self._min[name]
del self._max[name]
del self._relsize[name]
del self._relmin[name]
del self._relmax[name]
last = len(self._paneNames)
del self._separator[last]
del self._button[last]
if last > 0:
self.destroycomponent(self._sepName(last))
self.destroycomponent(self._buttonName(last))
self._plotHandles()
def setnaturalsize(self):
self.update_idletasks()
totalWidth = 0
totalHeight = 0
maxWidth = 0
maxHeight = 0
for name in self._paneNames:
frame = self._frame[name]
w = frame.winfo_reqwidth()
h = frame.winfo_reqheight()
totalWidth = totalWidth + w
totalHeight = totalHeight + h
if maxWidth < w:
maxWidth = w
if maxHeight < h:
maxHeight = h
# Note that, since the hull is a frame, the width and height
# options specify the geometry *outside* the borderwidth and
# highlightthickness.
bw = string.atoi(str(self.cget('hull_borderwidth')))
hl = string.atoi(str(self.cget('hull_highlightthickness')))
extra = (bw + hl) * 2
if str(self.cget('orient')) == 'horizontal':
totalWidth = totalWidth + extra
maxHeight = maxHeight + extra
self.configure(hull_width = totalWidth, hull_height = maxHeight)
else:
totalHeight = (totalHeight + extra +
(len(self._paneNames) - 1) * self._separatorThickness)
maxWidth = maxWidth + extra
self.configure(hull_width = maxWidth, hull_height = totalHeight)
def move(self, name, newPos, newPosOffset = 0):
# see if we can spare ourselves some work
numPanes = len(self._paneNames)
if numPanes < 2:
return
newPos = self._nameToIndex(newPos) + newPosOffset
if newPos < 0 or newPos >=numPanes:
return
deletePos = self._nameToIndex(name)
if deletePos == newPos:
# inserting over ourself is a no-op
return
# delete name from old position in list
name = self._paneNames[deletePos]
del self._paneNames[deletePos]
# place in new position
self._paneNames[newPos:newPos] = [name]
# force everything to redraw
self._plotHandles()
self._updateSizes()
def _nameToIndex(self, nameOrIndex):
try:
pos = self._paneNames.index(nameOrIndex)
except ValueError:
pos = nameOrIndex
return pos
def _initPaneOptions(self, name):
# Set defaults.
self._size[name] = 0
self._relsize[name] = None
self._min[name] = 0
self._relmin[name] = None
self._max[name] = 100000
self._relmax[name] = None
def _parsePaneOptions(self, name, args):
# Parse <args> for options.
for arg, value in args.items():
if type(value) == types.FloatType:
relvalue = value
value = self._absSize(relvalue)
else:
relvalue = None
if arg == 'size':
self._size[name], self._relsize[name] = value, relvalue
elif arg == 'min':
self._min[name], self._relmin[name] = value, relvalue
elif arg == 'max':
self._max[name], self._relmax[name] = value, relvalue
else:
raise ValueError, 'keyword must be "size", "min", or "max"'
def _absSize(self, relvalue):
return int(round(relvalue * self._majorSize))
def _sepName(self, n):
return 'separator-%d' % n
def _buttonName(self, n):
return 'handle-%d' % n
def _addSeparator(self):
n = len(self._paneNames) - 1
downFunc = lambda event, s = self, num=n: s._btnDown(event, num)
upFunc = lambda event, s = self, num=n: s._btnUp(event, num)
moveFunc = lambda event, s = self, num=n: s._btnMove(event, num)
# Create the line dividing the panes.
sep = self.createcomponent(self._sepName(n),
(), 'Separator',
Tkinter.Frame, (self.interior(),),
borderwidth = 1,
relief = self['separatorrelief'])
self._separator.append(sep)
sep.bind('<ButtonPress-1>', downFunc)
sep.bind('<Any-ButtonRelease-1>', upFunc)
sep.bind('<B1-Motion>', moveFunc)
if self['orient'] == 'vertical':
cursor = 'sb_v_double_arrow'
sep.configure(height = self._separatorThickness,
width = 10000, cursor = cursor)
else:
cursor = 'sb_h_double_arrow'
sep.configure(width = self._separatorThickness,
height = 10000, cursor = cursor)
self._totalSize = self._totalSize + self._separatorThickness
# Create the handle on the dividing line.
handle = self.createcomponent(self._buttonName(n),
(), 'Handle',
Tkinter.Frame, (self.interior(),),
relief = 'raised',
borderwidth = 1,
width = self._handleSize,
height = self._handleSize,
cursor = cursor,
)
self._button.append(handle)
handle.bind('<ButtonPress-1>', downFunc)
handle.bind('<Any-ButtonRelease-1>', upFunc)
handle.bind('<B1-Motion>', moveFunc)
self._plotHandles()
for i in range(1, len(self._paneNames)):
self._separator[i].tkraise()
for i in range(1, len(self._paneNames)):
self._button[i].tkraise()
def _btnUp(self, event, item):
self._buttonIsDown = 0
self._updateSizes()
try:
self._button[item].configure(relief='raised')
except:
pass
def _btnDown(self, event, item):
self._button[item].configure(relief='sunken')
self._getMotionLimit(item)
self._buttonIsDown = 1
self._movePending = 0
def _handleConfigure(self, event = None):
self._getNaturalSizes()
if self._totalSize == 0:
return
iterRange = list(self._paneNames)
iterRange.reverse()
if self._majorSize > self._totalSize:
n = self._majorSize - self._totalSize
self._iterate(iterRange, self._grow, n)
elif self._majorSize < self._totalSize:
n = self._totalSize - self._majorSize
self._iterate(iterRange, self._shrink, n)
self._plotHandles()
self._updateSizes()
def _getNaturalSizes(self):
# Must call this in order to get correct winfo_width, winfo_height
self.update_idletasks()
self._totalSize = 0
if self['orient'] == 'vertical':
self._majorSize = self.winfo_height()
self._minorSize = self.winfo_width()
majorspec = Tkinter.Frame.winfo_reqheight
else:
self._majorSize = self.winfo_width()
self._minorSize = self.winfo_height()
majorspec = Tkinter.Frame.winfo_reqwidth
bw = string.atoi(str(self.cget('hull_borderwidth')))
hl = string.atoi(str(self.cget('hull_highlightthickness')))
extra = (bw + hl) * 2
self._majorSize = self._majorSize - extra
self._minorSize = self._minorSize - extra
if self._majorSize < 0:
self._majorSize = 0
if self._minorSize < 0:
self._minorSize = 0
for name in self._paneNames:
# adjust the absolute sizes first...
if self._relsize[name] is None:
#special case
if self._size[name] == 0:
self._size[name] = apply(majorspec, (self._frame[name],))
self._setrel(name)
else:
self._size[name] = self._absSize(self._relsize[name])
if self._relmin[name] is not None:
self._min[name] = self._absSize(self._relmin[name])
if self._relmax[name] is not None:
self._max[name] = self._absSize(self._relmax[name])
# now adjust sizes
if self._size[name] < self._min[name]:
self._size[name] = self._min[name]
self._setrel(name)
if self._size[name] > self._max[name]:
self._size[name] = self._max[name]
self._setrel(name)
self._totalSize = self._totalSize + self._size[name]
# adjust for separators
self._totalSize = (self._totalSize +
(len(self._paneNames) - 1) * self._separatorThickness)
def _setrel(self, name):
if self._relsize[name] is not None:
if self._majorSize != 0:
self._relsize[name] = round(self._size[name]) / self._majorSize
def _iterate(self, names, proc, n):
for i in names:
n = apply(proc, (i, n))
if n == 0:
break
def _grow(self, name, n):
canGrow = self._max[name] - self._size[name]
if canGrow > n:
self._size[name] = self._size[name] + n
self._setrel(name)
return 0
elif canGrow > 0:
self._size[name] = self._max[name]
self._setrel(name)
n = n - canGrow
return n
def _shrink(self, name, n):
canShrink = self._size[name] - self._min[name]
if canShrink > n:
self._size[name] = self._size[name] - n
self._setrel(name)
return 0
elif canShrink > 0:
self._size[name] = self._min[name]
self._setrel(name)
n = n - canShrink
return n
def _updateSizes(self):
totalSize = 0
for name in self._paneNames:
size = self._size[name]
if self['orient'] == 'vertical':
self._frame[name].place(x = 0, relwidth = 1,
y = totalSize,
height = size)
else:
self._frame[name].place(y = 0, relheight = 1,
x = totalSize,
width = size)
totalSize = totalSize + size + self._separatorThickness
# Invoke the callback command
cmd = self['command']
if callable(cmd):
cmd(map(lambda x, s = self: s._size[x], self._paneNames))
def _plotHandles(self):
if len(self._paneNames) == 0:
return
if self['orient'] == 'vertical':
btnp = self._minorSize - 13
else:
h = self._minorSize
if h > 18:
btnp = 9
else:
btnp = h - 9
firstPane = self._paneNames[0]
totalSize = self._size[firstPane]
first = 1
last = len(self._paneNames) - 1
# loop from first to last, inclusive
for i in range(1, last + 1):
handlepos = totalSize - 3
prevSize = self._size[self._paneNames[i - 1]]
nextSize = self._size[self._paneNames[i]]
offset1 = 0
if i == first:
if prevSize < 4:
offset1 = 4 - prevSize
else:
if prevSize < 8:
offset1 = (8 - prevSize) / 2
offset2 = 0
if i == last:
if nextSize < 4:
offset2 = nextSize - 4
else:
if nextSize < 8:
offset2 = (nextSize - 8) / 2
handlepos = handlepos + offset1
if self['orient'] == 'vertical':
height = 8 - offset1 + offset2
if height > 1:
self._button[i].configure(height = height)
self._button[i].place(x = btnp, y = handlepos)
else:
self._button[i].place_forget()
self._separator[i].place(x = 0, y = totalSize,
relwidth = 1)
else:
width = 8 - offset1 + offset2
if width > 1:
self._button[i].configure(width = width)
self._button[i].place(y = btnp, x = handlepos)
else:
self._button[i].place_forget()
self._separator[i].place(y = 0, x = totalSize,
relheight = 1)
totalSize = totalSize + nextSize + self._separatorThickness
def pane(self, name):
return self._frame[self._paneNames[self._nameToIndex(name)]]
# Return the name of all panes
def panes(self):
return list(self._paneNames)
def configurepane(self, name, **kw):
name = self._paneNames[self._nameToIndex(name)]
self._parsePaneOptions(name, kw)
self._handleConfigure()
def updatelayout(self):
self._handleConfigure()
def _getMotionLimit(self, item):
curBefore = (item - 1) * self._separatorThickness
minBefore, maxBefore = curBefore, curBefore
for name in self._paneNames[:item]:
curBefore = curBefore + self._size[name]
minBefore = minBefore + self._min[name]
maxBefore = maxBefore + self._max[name]
curAfter = (len(self._paneNames) - item) * self._separatorThickness
minAfter, maxAfter = curAfter, curAfter
for name in self._paneNames[item:]:
curAfter = curAfter + self._size[name]
minAfter = minAfter + self._min[name]
maxAfter = maxAfter + self._max[name]
beforeToGo = min(curBefore - minBefore, maxAfter - curAfter)
afterToGo = min(curAfter - minAfter, maxBefore - curBefore)
self._beforeLimit = curBefore - beforeToGo
self._afterLimit = curBefore + afterToGo
self._curSize = curBefore
self._plotHandles()
# Compress the motion so that update is quick even on slow machines
#
# theRootp = root position (either rootx or rooty)
def _btnMove(self, event, item):
self._rootp = event
if self._movePending == 0:
self._timerId = self.after_idle(
lambda s = self, i = item: s._btnMoveCompressed(i))
self._movePending = 1
def destroy(self):
if self._timerId is not None:
self.after_cancel(self._timerId)
self._timerId = None
MegaWidget.destroy(self)
def _btnMoveCompressed(self, item):
if not self._buttonIsDown:
return
if self['orient'] == 'vertical':
p = self._rootp.y_root - self.winfo_rooty()
else:
p = self._rootp.x_root - self.winfo_rootx()
if p == self._curSize:
self._movePending = 0
return
if p < self._beforeLimit:
p = self._beforeLimit
if p >= self._afterLimit:
p = self._afterLimit
self._calculateChange(item, p)
self.update_idletasks()
self._movePending = 0
# Calculate the change in response to mouse motions
def _calculateChange(self, item, p):
if p < self._curSize:
self._moveBefore(item, p)
elif p > self._curSize:
self._moveAfter(item, p)
self._plotHandles()
def _moveBefore(self, item, p):
n = self._curSize - p
# Shrink the frames before
iterRange = list(self._paneNames[:item])
iterRange.reverse()
self._iterate(iterRange, self._shrink, n)
# Adjust the frames after
iterRange = self._paneNames[item:]
self._iterate(iterRange, self._grow, n)
self._curSize = p
def _moveAfter(self, item, p):
n = p - self._curSize
# Shrink the frames after
iterRange = self._paneNames[item:]
self._iterate(iterRange, self._shrink, n)
# Adjust the frames before
iterRange = list(self._paneNames[:item])
iterRange.reverse()
self._iterate(iterRange, self._grow, n)
self._curSize = p
######################################################################
### File: PmwPromptDialog.py
# Based on iwidgets2.2.0/promptdialog.itk code.
# A Dialog with an entryfield
class PromptDialog(Dialog):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 20, INITOPT),
('bordery', 20, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
aliases = (
('entry', 'entryfield_entry'),
('label', 'entryfield_label'),
)
self._promptDialogEntry = self.createcomponent('entryfield',
aliases, None,
EntryField, (interior,))
self._promptDialogEntry.pack(fill='x', expand=1,
padx = self['borderx'], pady = self['bordery'])
if not kw.has_key('activatecommand'):
# Whenever this dialog is activated, set the focus to the
# EntryField's entry widget.
tkentry = self.component('entry')
self.configure(activatecommand = tkentry.focus_set)
# Check keywords and initialise options.
self.initialiseoptions()
# Supply aliases to some of the entry component methods.
def insertentry(self, index, text):
self._promptDialogEntry.insert(index, text)
def deleteentry(self, first, last=None):
self._promptDialogEntry.delete(first, last)
def indexentry(self, index):
return self._promptDialogEntry.index(index)
forwardmethods(PromptDialog, EntryField, '_promptDialogEntry')
######################################################################
### File: PmwRadioSelect.py
import types
import Tkinter
class RadioSelect(MegaWidget):
# A collection of several buttons. In single mode, only one
# button may be selected. In multiple mode, any number of buttons
# may be selected.
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('buttontype', 'button', INITOPT),
('command', None, None),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('orient', 'horizontal', INITOPT),
('padx', 5, INITOPT),
('pady', 5, INITOPT),
('selectmode', 'single', INITOPT),
)
self.defineoptions(kw, optiondefs, dynamicGroups = ('Button',))
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
if self['labelpos'] is None:
self._radioSelectFrame = self._hull
else:
self._radioSelectFrame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (interior,))
self._radioSelectFrame.grid(column=2, row=2, sticky='nsew')
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
self.createlabel(interior)
# Initialise instance variables.
self._buttonList = []
if self['selectmode'] == 'single':
self._singleSelect = 1
elif self['selectmode'] == 'multiple':
self._singleSelect = 0
else:
raise ValueError, 'bad selectmode option "' + \
self['selectmode'] + '": should be single or multiple'
if self['buttontype'] == 'button':
self.buttonClass = Tkinter.Button
elif self['buttontype'] == 'radiobutton':
self._singleSelect = 1
self.var = Tkinter.StringVar()
self.buttonClass = Tkinter.Radiobutton
elif self['buttontype'] == 'checkbutton':
self._singleSelect = 0
self.buttonClass = Tkinter.Checkbutton
else:
raise ValueError, 'bad buttontype option "' + \
self['buttontype'] + \
'": should be button, radiobutton or checkbutton'
if self._singleSelect:
self.selection = None
else:
self.selection = []
if self['orient'] not in ('horizontal', 'vertical'):
raise ValueError, 'bad orient option ' + repr(self['orient']) + \
': must be either \'horizontal\' or \'vertical\''
# Check keywords and initialise options.
self.initialiseoptions()
def getcurselection(self):
if self._singleSelect:
return self.selection
else:
return tuple(self.selection)
def getvalue(self):
return self.getcurselection()
def setvalue(self, textOrList):
if self._singleSelect:
self.__setSingleValue(textOrList)
else:
# Multiple selections
oldselection = self.selection
self.selection = textOrList
for button in self._buttonList:
if button in oldselection:
if button not in self.selection:
# button is currently selected but should not be
widget = self.component(button)
if self['buttontype'] == 'checkbutton':
widget.deselect()
else: # Button
widget.configure(relief='raised')
else:
if button in self.selection:
# button is not currently selected but should be
widget = self.component(button)
if self['buttontype'] == 'checkbutton':
widget.select()
else: # Button
widget.configure(relief='sunken')
def numbuttons(self):
return len(self._buttonList)
def index(self, index):
# Return the integer index of the button with the given index.
listLength = len(self._buttonList)
if type(index) == types.IntType:
if index < listLength:
return index
else:
raise ValueError, 'index "%s" is out of range' % index
elif index is END:
if listLength > 0:
return listLength - 1
else:
raise ValueError, 'RadioSelect has no buttons'
else:
for count in range(listLength):
name = self._buttonList[count]
if index == name:
return count
validValues = 'a name, a number or END'
raise ValueError, \
'bad index "%s": must be %s' % (index, validValues)
def button(self, buttonIndex):
name = self._buttonList[self.index(buttonIndex)]
return self.component(name)
def add(self, componentName, **kw):
if componentName in self._buttonList:
raise ValueError, 'button "%s" already exists' % componentName
kw['command'] = \
lambda self=self, name=componentName: self.invoke(name)
if not kw.has_key('text'):
kw['text'] = componentName
if self['buttontype'] == 'radiobutton':
if not kw.has_key('anchor'):
kw['anchor'] = 'w'
if not kw.has_key('variable'):
kw['variable'] = self.var
if not kw.has_key('value'):
kw['value'] = kw['text']
elif self['buttontype'] == 'checkbutton':
if not kw.has_key('anchor'):
kw['anchor'] = 'w'
button = apply(self.createcomponent, (componentName,
(), 'Button',
self.buttonClass, (self._radioSelectFrame,)), kw)
if self['orient'] == 'horizontal':
self._radioSelectFrame.grid_rowconfigure(0, weight=1)
col = len(self._buttonList)
button.grid(column=col, row=0, padx = self['padx'],
pady = self['pady'], sticky='nsew')
self._radioSelectFrame.grid_columnconfigure(col, weight=1)
else:
self._radioSelectFrame.grid_columnconfigure(0, weight=1)
row = len(self._buttonList)
button.grid(column=0, row=row, padx = self['padx'],
pady = self['pady'], sticky='ew')
self._radioSelectFrame.grid_rowconfigure(row, weight=1)
self._buttonList.append(componentName)
return button
def deleteall(self):
for name in self._buttonList:
self.destroycomponent(name)
self._buttonList = []
if self._singleSelect:
self.selection = None
else:
self.selection = []
def __setSingleValue(self, value):
self.selection = value
if self['buttontype'] == 'radiobutton':
widget = self.component(value)
widget.select()
else: # Button
for button in self._buttonList:
widget = self.component(button)
if button == value:
widget.configure(relief='sunken')
else:
widget.configure(relief='raised')
def invoke(self, index):
index = self.index(index)
name = self._buttonList[index]
if self._singleSelect:
self.__setSingleValue(name)
command = self['command']
if callable(command):
return command(name)
else:
# Multiple selections
widget = self.component(name)
if name in self.selection:
if self['buttontype'] == 'checkbutton':
widget.deselect()
else:
widget.configure(relief='raised')
self.selection.remove(name)
state = 0
else:
if self['buttontype'] == 'checkbutton':
widget.select()
else:
widget.configure(relief='sunken')
self.selection.append(name)
state = 1
command = self['command']
if callable(command):
return command(name, state)
######################################################################
### File: PmwScrolledCanvas.py
import Tkinter
class ScrolledCanvas(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderframe', 0, INITOPT),
('canvasmargin', 0, INITOPT),
('hscrollmode', 'dynamic', self._hscrollMode),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('scrollmargin', 2, INITOPT),
('usehullsize', 0, INITOPT),
('vscrollmode', 'dynamic', self._vscrollMode),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
self.origInterior = MegaWidget.interior(self)
if self['usehullsize']:
self.origInterior.grid_propagate(0)
if self['borderframe']:
# Create a frame widget to act as the border of the canvas.
self._borderframe = self.createcomponent('borderframe',
(), None,
Tkinter.Frame, (self.origInterior,),
relief = 'sunken',
borderwidth = 2,
)
self._borderframe.grid(row = 2, column = 2, sticky = 'news')
# Create the canvas widget.
self._canvas = self.createcomponent('canvas',
(), None,
Tkinter.Canvas, (self._borderframe,),
highlightthickness = 0,
borderwidth = 0,
)
self._canvas.pack(fill = 'both', expand = 1)
else:
# Create the canvas widget.
self._canvas = self.createcomponent('canvas',
(), None,
Tkinter.Canvas, (self.origInterior,),
relief = 'sunken',
borderwidth = 2,
)
self._canvas.grid(row = 2, column = 2, sticky = 'news')
self.origInterior.grid_rowconfigure(2, weight = 1, minsize = 0)
self.origInterior.grid_columnconfigure(2, weight = 1, minsize = 0)
# Create the horizontal scrollbar
self._horizScrollbar = self.createcomponent('horizscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (self.origInterior,),
orient='horizontal',
command=self._canvas.xview
)
# Create the vertical scrollbar
self._vertScrollbar = self.createcomponent('vertscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (self.origInterior,),
orient='vertical',
command=self._canvas.yview
)
self.createlabel(self.origInterior, childCols = 3, childRows = 3)
# Initialise instance variables.
self._horizScrollbarOn = 0
self._vertScrollbarOn = 0
self.scrollTimer = None
self._scrollRecurse = 0
self._horizScrollbarNeeded = 0
self._vertScrollbarNeeded = 0
self.setregionTimer = None
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self.scrollTimer is not None:
self.after_cancel(self.scrollTimer)
self.scrollTimer = None
if self.setregionTimer is not None:
self.after_cancel(self.setregionTimer)
self.setregionTimer = None
MegaWidget.destroy(self)
# ======================================================================
# Public methods.
def interior(self):
return self._canvas
def resizescrollregion(self):
if self.setregionTimer is None:
self.setregionTimer = self.after_idle(self._setRegion)
# ======================================================================
# Configuration methods.
def _hscrollMode(self):
# The horizontal scroll mode has been configured.
mode = self['hscrollmode']
if mode == 'static':
if not self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'none':
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
message = 'bad hscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
def _vscrollMode(self):
# The vertical scroll mode has been configured.
mode = self['vscrollmode']
if mode == 'static':
if not self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'none':
if self._vertScrollbarOn:
self._toggleVertScrollbar()
else:
message = 'bad vscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
# ======================================================================
# Private methods.
def _configureScrollCommands(self):
# If both scrollmodes are not dynamic we can save a lot of
# time by not having to create an idle job to handle the
# scroll commands.
# Clean up previous scroll commands to prevent memory leak.
tclCommandName = str(self._canvas.cget('xscrollcommand'))
if tclCommandName != '':
self._canvas.deletecommand(tclCommandName)
tclCommandName = str(self._canvas.cget('yscrollcommand'))
if tclCommandName != '':
self._canvas.deletecommand(tclCommandName)
if self['hscrollmode'] == self['vscrollmode'] == 'dynamic':
self._canvas.configure(
xscrollcommand=self._scrollBothLater,
yscrollcommand=self._scrollBothLater
)
else:
self._canvas.configure(
xscrollcommand=self._scrollXNow,
yscrollcommand=self._scrollYNow
)
def _scrollXNow(self, first, last):
self._horizScrollbar.set(first, last)
self._horizScrollbarNeeded = ((first, last) != ('0', '1'))
if self['hscrollmode'] == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
def _scrollYNow(self, first, last):
self._vertScrollbar.set(first, last)
self._vertScrollbarNeeded = ((first, last) != ('0', '1'))
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _scrollBothLater(self, first, last):
# Called by the canvas to set the horizontal or vertical
# scrollbar when it has scrolled or changed scrollregion.
if self.scrollTimer is None:
self.scrollTimer = self.after_idle(self._scrollBothNow)
def _scrollBothNow(self):
# This performs the function of _scrollXNow and _scrollYNow.
# If one is changed, the other should be updated to match.
self.scrollTimer = None
# Call update_idletasks to make sure that the containing frame
# has been resized before we attempt to set the scrollbars.
# Otherwise the scrollbars may be mapped/unmapped continuously.
self._scrollRecurse = self._scrollRecurse + 1
self.update_idletasks()
self._scrollRecurse = self._scrollRecurse - 1
if self._scrollRecurse != 0:
return
xview = self._canvas.xview()
yview = self._canvas.yview()
self._horizScrollbar.set(xview[0], xview[1])
self._vertScrollbar.set(yview[0], yview[1])
self._horizScrollbarNeeded = (xview != (0.0, 1.0))
self._vertScrollbarNeeded = (yview != (0.0, 1.0))
# If both horizontal and vertical scrollmodes are dynamic and
# currently only one scrollbar is mapped and both should be
# toggled, then unmap the mapped scrollbar. This prevents a
# continuous mapping and unmapping of the scrollbars.
if (self['hscrollmode'] == self['vscrollmode'] == 'dynamic' and
self._horizScrollbarNeeded != self._horizScrollbarOn and
self._vertScrollbarNeeded != self._vertScrollbarOn and
self._vertScrollbarOn != self._horizScrollbarOn):
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
self._toggleVertScrollbar()
return
if self['hscrollmode'] == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _toggleHorizScrollbar(self):
self._horizScrollbarOn = not self._horizScrollbarOn
interior = self.origInterior
if self._horizScrollbarOn:
self._horizScrollbar.grid(row = 4, column = 2, sticky = 'news')
interior.grid_rowconfigure(3, minsize = self['scrollmargin'])
else:
self._horizScrollbar.grid_forget()
interior.grid_rowconfigure(3, minsize = 0)
def _toggleVertScrollbar(self):
self._vertScrollbarOn = not self._vertScrollbarOn
interior = self.origInterior
if self._vertScrollbarOn:
self._vertScrollbar.grid(row = 2, column = 4, sticky = 'news')
interior.grid_columnconfigure(3, minsize = self['scrollmargin'])
else:
self._vertScrollbar.grid_forget()
interior.grid_columnconfigure(3, minsize = 0)
def _setRegion(self):
self.setregionTimer = None
region = self._canvas.bbox('all')
if region is not None:
canvasmargin = self['canvasmargin']
region = (region[0] - canvasmargin, region[1] - canvasmargin,
region[2] + canvasmargin, region[3] + canvasmargin)
self._canvas.configure(scrollregion = region)
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Frame.Grid.
def bbox(self, *args):
return apply(self._canvas.bbox, args)
forwardmethods(ScrolledCanvas, Tkinter.Canvas, '_canvas')
######################################################################
### File: PmwScrolledField.py
import Tkinter
class ScrolledField(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('sticky', 'ew', INITOPT),
('text', '', self._text),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._scrolledFieldEntry = self.createcomponent('entry',
(), None,
Tkinter.Entry, (interior,))
# Can't always use 'disabled', since this greys out text in Tk 8.4.2
try:
self._scrolledFieldEntry.configure(state = 'readonly')
except Tkinter.TclError:
self._scrolledFieldEntry.configure(state = 'disabled')
self._scrolledFieldEntry.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
self.createlabel(interior)
# Check keywords and initialise options.
self.initialiseoptions()
def _text(self):
text = self['text']
self._scrolledFieldEntry.configure(state = 'normal')
self._scrolledFieldEntry.delete(0, 'end')
self._scrolledFieldEntry.insert('end', text)
# Can't always use 'disabled', since this greys out text in Tk 8.4.2
try:
self._scrolledFieldEntry.configure(state = 'readonly')
except Tkinter.TclError:
self._scrolledFieldEntry.configure(state = 'disabled')
forwardmethods(ScrolledField, Tkinter.Entry, '_scrolledFieldEntry')
######################################################################
### File: PmwScrolledFrame.py
import string
import types
import Tkinter
class ScrolledFrame(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderframe', 1, INITOPT),
('horizflex', 'fixed', self._horizflex),
('horizfraction', 0.05, INITOPT),
('hscrollmode', 'dynamic', self._hscrollMode),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('scrollmargin', 2, INITOPT),
('usehullsize', 0, INITOPT),
('vertflex', 'fixed', self._vertflex),
('vertfraction', 0.05, INITOPT),
('vscrollmode', 'dynamic', self._vscrollMode),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
self.origInterior = MegaWidget.interior(self)
if self['usehullsize']:
self.origInterior.grid_propagate(0)
if self['borderframe']:
# Create a frame widget to act as the border of the clipper.
self._borderframe = self.createcomponent('borderframe',
(), None,
Tkinter.Frame, (self.origInterior,),
relief = 'sunken',
borderwidth = 2,
)
self._borderframe.grid(row = 2, column = 2, sticky = 'news')
# Create the clipping window.
self._clipper = self.createcomponent('clipper',
(), None,
Tkinter.Frame, (self._borderframe,),
width = 400,
height = 300,
highlightthickness = 0,
borderwidth = 0,
)
self._clipper.pack(fill = 'both', expand = 1)
else:
# Create the clipping window.
self._clipper = self.createcomponent('clipper',
(), None,
Tkinter.Frame, (self.origInterior,),
width = 400,
height = 300,
relief = 'sunken',
borderwidth = 2,
)
self._clipper.grid(row = 2, column = 2, sticky = 'news')
self.origInterior.grid_rowconfigure(2, weight = 1, minsize = 0)
self.origInterior.grid_columnconfigure(2, weight = 1, minsize = 0)
# Create the horizontal scrollbar
self._horizScrollbar = self.createcomponent('horizscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (self.origInterior,),
orient='horizontal',
command=self.xview
)
# Create the vertical scrollbar
self._vertScrollbar = self.createcomponent('vertscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (self.origInterior,),
orient='vertical',
command=self.yview
)
self.createlabel(self.origInterior, childCols = 3, childRows = 3)
# Initialise instance variables.
self._horizScrollbarOn = 0
self._vertScrollbarOn = 0
self.scrollTimer = None
self._scrollRecurse = 0
self._horizScrollbarNeeded = 0
self._vertScrollbarNeeded = 0
self.startX = 0
self.startY = 0
self._flexoptions = ('fixed', 'expand', 'shrink', 'elastic')
# Create a frame in the clipper to contain the widgets to be
# scrolled.
self._frame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (self._clipper,)
)
# Whenever the clipping window or scrolled frame change size,
# update the scrollbars.
self._frame.bind('<Configure>', self._reposition)
self._clipper.bind('<Configure>', self._reposition)
# Work around a bug in Tk where the value returned by the
# scrollbar get() method is (0.0, 0.0, 0.0, 0.0) rather than
# the expected 2-tuple. This occurs if xview() is called soon
# after the ScrolledFrame has been created.
self._horizScrollbar.set(0.0, 1.0)
self._vertScrollbar.set(0.0, 1.0)
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self.scrollTimer is not None:
self.after_cancel(self.scrollTimer)
self.scrollTimer = None
MegaWidget.destroy(self)
# ======================================================================
# Public methods.
def interior(self):
return self._frame
# Set timer to call real reposition method, so that it is not
# called multiple times when many things are reconfigured at the
# same time.
def reposition(self):
if self.scrollTimer is None:
self.scrollTimer = self.after_idle(self._scrollBothNow)
# Called when the user clicks in the horizontal scrollbar.
# Calculates new position of frame then calls reposition() to
# update the frame and the scrollbar.
def xview(self, mode = None, value = None, units = None):
if type(value) == types.StringType:
value = string.atof(value)
if mode is None:
return self._horizScrollbar.get()
elif mode == 'moveto':
frameWidth = self._frame.winfo_reqwidth()
self.startX = value * float(frameWidth)
else: # mode == 'scroll'
clipperWidth = self._clipper.winfo_width()
if units == 'units':
jump = int(clipperWidth * self['horizfraction'])
else:
jump = clipperWidth
self.startX = self.startX + value * jump
self.reposition()
# Called when the user clicks in the vertical scrollbar.
# Calculates new position of frame then calls reposition() to
# update the frame and the scrollbar.
def yview(self, mode = None, value = None, units = None):
if type(value) == types.StringType:
value = string.atof(value)
if mode is None:
return self._vertScrollbar.get()
elif mode == 'moveto':
frameHeight = self._frame.winfo_reqheight()
self.startY = value * float(frameHeight)
else: # mode == 'scroll'
clipperHeight = self._clipper.winfo_height()
if units == 'units':
jump = int(clipperHeight * self['vertfraction'])
else:
jump = clipperHeight
self.startY = self.startY + value * jump
self.reposition()
# ======================================================================
# Configuration methods.
def _hscrollMode(self):
# The horizontal scroll mode has been configured.
mode = self['hscrollmode']
if mode == 'static':
if not self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'none':
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
message = 'bad hscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
def _vscrollMode(self):
# The vertical scroll mode has been configured.
mode = self['vscrollmode']
if mode == 'static':
if not self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'none':
if self._vertScrollbarOn:
self._toggleVertScrollbar()
else:
message = 'bad vscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
def _horizflex(self):
# The horizontal flex mode has been configured.
flex = self['horizflex']
if flex not in self._flexoptions:
message = 'bad horizflex option "%s": should be one of %s' % \
(flex, str(self._flexoptions))
raise ValueError, message
self.reposition()
def _vertflex(self):
# The vertical flex mode has been configured.
flex = self['vertflex']
if flex not in self._flexoptions:
message = 'bad vertflex option "%s": should be one of %s' % \
(flex, str(self._flexoptions))
raise ValueError, message
self.reposition()
# ======================================================================
# Private methods.
def _reposition(self, event):
self.reposition()
def _getxview(self):
# Horizontal dimension.
clipperWidth = self._clipper.winfo_width()
frameWidth = self._frame.winfo_reqwidth()
if frameWidth <= clipperWidth:
# The scrolled frame is smaller than the clipping window.
self.startX = 0
endScrollX = 1.0
if self['horizflex'] in ('expand', 'elastic'):
relwidth = 1
else:
relwidth = ''
else:
# The scrolled frame is larger than the clipping window.
if self['horizflex'] in ('shrink', 'elastic'):
self.startX = 0
endScrollX = 1.0
relwidth = 1
else:
if self.startX + clipperWidth > frameWidth:
self.startX = frameWidth - clipperWidth
endScrollX = 1.0
else:
if self.startX < 0:
self.startX = 0
endScrollX = (self.startX + clipperWidth) / float(frameWidth)
relwidth = ''
# Position frame relative to clipper.
self._frame.place(x = -self.startX, relwidth = relwidth)
return (self.startX / float(frameWidth), endScrollX)
def _getyview(self):
# Vertical dimension.
clipperHeight = self._clipper.winfo_height()
frameHeight = self._frame.winfo_reqheight()
if frameHeight <= clipperHeight:
# The scrolled frame is smaller than the clipping window.
self.startY = 0
endScrollY = 1.0
if self['vertflex'] in ('expand', 'elastic'):
relheight = 1
else:
relheight = ''
else:
# The scrolled frame is larger than the clipping window.
if self['vertflex'] in ('shrink', 'elastic'):
self.startY = 0
endScrollY = 1.0
relheight = 1
else:
if self.startY + clipperHeight > frameHeight:
self.startY = frameHeight - clipperHeight
endScrollY = 1.0
else:
if self.startY < 0:
self.startY = 0
endScrollY = (self.startY + clipperHeight) / float(frameHeight)
relheight = ''
# Position frame relative to clipper.
self._frame.place(y = -self.startY, relheight = relheight)
return (self.startY / float(frameHeight), endScrollY)
# According to the relative geometries of the frame and the
# clipper, reposition the frame within the clipper and reset the
# scrollbars.
def _scrollBothNow(self):
self.scrollTimer = None
# Call update_idletasks to make sure that the containing frame
# has been resized before we attempt to set the scrollbars.
# Otherwise the scrollbars may be mapped/unmapped continuously.
self._scrollRecurse = self._scrollRecurse + 1
self.update_idletasks()
self._scrollRecurse = self._scrollRecurse - 1
if self._scrollRecurse != 0:
return
xview = self._getxview()
yview = self._getyview()
self._horizScrollbar.set(xview[0], xview[1])
self._vertScrollbar.set(yview[0], yview[1])
self._horizScrollbarNeeded = (xview != (0.0, 1.0))
self._vertScrollbarNeeded = (yview != (0.0, 1.0))
# If both horizontal and vertical scrollmodes are dynamic and
# currently only one scrollbar is mapped and both should be
# toggled, then unmap the mapped scrollbar. This prevents a
# continuous mapping and unmapping of the scrollbars.
if (self['hscrollmode'] == self['vscrollmode'] == 'dynamic' and
self._horizScrollbarNeeded != self._horizScrollbarOn and
self._vertScrollbarNeeded != self._vertScrollbarOn and
self._vertScrollbarOn != self._horizScrollbarOn):
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
self._toggleVertScrollbar()
return
if self['hscrollmode'] == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _toggleHorizScrollbar(self):
self._horizScrollbarOn = not self._horizScrollbarOn
interior = self.origInterior
if self._horizScrollbarOn:
self._horizScrollbar.grid(row = 4, column = 2, sticky = 'news')
interior.grid_rowconfigure(3, minsize = self['scrollmargin'])
else:
self._horizScrollbar.grid_forget()
interior.grid_rowconfigure(3, minsize = 0)
def _toggleVertScrollbar(self):
self._vertScrollbarOn = not self._vertScrollbarOn
interior = self.origInterior
if self._vertScrollbarOn:
self._vertScrollbar.grid(row = 2, column = 4, sticky = 'news')
interior.grid_columnconfigure(3, minsize = self['scrollmargin'])
else:
self._vertScrollbar.grid_forget()
interior.grid_columnconfigure(3, minsize = 0)
######################################################################
### File: PmwScrolledListBox.py
# Based on iwidgets2.2.0/scrolledlistbox.itk code.
import types
import Tkinter
class ScrolledListBox(MegaWidget):
_classBindingsDefinedFor = 0
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('dblclickcommand', None, None),
('hscrollmode', 'dynamic', self._hscrollMode),
('items', (), INITOPT),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('scrollmargin', 2, INITOPT),
('selectioncommand', None, None),
('usehullsize', 0, INITOPT),
('vscrollmode', 'dynamic', self._vscrollMode),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
if self['usehullsize']:
interior.grid_propagate(0)
# Create the listbox widget.
self._listbox = self.createcomponent('listbox',
(), None,
Tkinter.Listbox, (interior,))
self._listbox.grid(row = 2, column = 2, sticky = 'news')
interior.grid_rowconfigure(2, weight = 1, minsize = 0)
interior.grid_columnconfigure(2, weight = 1, minsize = 0)
# Create the horizontal scrollbar
self._horizScrollbar = self.createcomponent('horizscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (interior,),
orient='horizontal',
command=self._listbox.xview
)
# Create the vertical scrollbar
self._vertScrollbar = self.createcomponent('vertscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (interior,),
orient='vertical',
command=self._listbox.yview
)
self.createlabel(interior, childCols = 3, childRows = 3)
# Add the items specified by the initialisation option.
items = self['items']
if type(items) != types.TupleType:
items = tuple(items)
if len(items) > 0:
apply(self._listbox.insert, ('end',) + items)
_registerScrolledList(self._listbox, self)
# Establish the special class bindings if not already done.
# Also create bindings if the Tkinter default interpreter has
# changed. Use Tkinter._default_root to create class
# bindings, so that a reference to root is created by
# bind_class rather than a reference to self, which would
# prevent object cleanup.
theTag = 'ScrolledListBoxTag'
if ScrolledListBox._classBindingsDefinedFor != Tkinter._default_root:
root = Tkinter._default_root
def doubleEvent(event):
_handleEvent(event, 'double')
def keyEvent(event):
_handleEvent(event, 'key')
def releaseEvent(event):
_handleEvent(event, 'release')
# Bind space and return keys and button 1 to the selectioncommand.
root.bind_class(theTag, '<Key-space>', keyEvent)
root.bind_class(theTag, '<Key-Return>', keyEvent)
root.bind_class(theTag, '<ButtonRelease-1>', releaseEvent)
# Bind double button 1 click to the dblclickcommand.
root.bind_class(theTag, '<Double-ButtonRelease-1>', doubleEvent)
ScrolledListBox._classBindingsDefinedFor = root
bindtags = self._listbox.bindtags()
self._listbox.bindtags(bindtags + (theTag,))
# Initialise instance variables.
self._horizScrollbarOn = 0
self._vertScrollbarOn = 0
self.scrollTimer = None
self._scrollRecurse = 0
self._horizScrollbarNeeded = 0
self._vertScrollbarNeeded = 0
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self.scrollTimer is not None:
self.after_cancel(self.scrollTimer)
self.scrollTimer = None
_deregisterScrolledList(self._listbox)
MegaWidget.destroy(self)
# ======================================================================
# Public methods.
def clear(self):
self.setlist(())
def getcurselection(self):
rtn = []
for sel in self.curselection():
rtn.append(self._listbox.get(sel))
return tuple(rtn)
def getvalue(self):
return self.getcurselection()
def setvalue(self, textOrList):
self._listbox.selection_clear(0, 'end')
listitems = list(self._listbox.get(0, 'end'))
if type(textOrList) == types.StringType:
if textOrList in listitems:
self._listbox.selection_set(listitems.index(textOrList))
else:
raise ValueError, 'no such item "%s"' % textOrList
else:
for item in textOrList:
if item in listitems:
self._listbox.selection_set(listitems.index(item))
else:
raise ValueError, 'no such item "%s"' % item
def setlist(self, items):
self._listbox.delete(0, 'end')
if len(items) > 0:
if type(items) != types.TupleType:
items = tuple(items)
apply(self._listbox.insert, (0,) + items)
# Override Tkinter.Listbox get method, so that if it is called with
# no arguments, return all list elements (consistent with other widgets).
def get(self, first=None, last=None):
if first is None:
return self._listbox.get(0, 'end')
else:
return self._listbox.get(first, last)
# ======================================================================
# Configuration methods.
def _hscrollMode(self):
# The horizontal scroll mode has been configured.
mode = self['hscrollmode']
if mode == 'static':
if not self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'none':
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
message = 'bad hscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
def _vscrollMode(self):
# The vertical scroll mode has been configured.
mode = self['vscrollmode']
if mode == 'static':
if not self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'none':
if self._vertScrollbarOn:
self._toggleVertScrollbar()
else:
message = 'bad vscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
# ======================================================================
# Private methods.
def _configureScrollCommands(self):
# If both scrollmodes are not dynamic we can save a lot of
# time by not having to create an idle job to handle the
# scroll commands.
# Clean up previous scroll commands to prevent memory leak.
tclCommandName = str(self._listbox.cget('xscrollcommand'))
if tclCommandName != '':
self._listbox.deletecommand(tclCommandName)
tclCommandName = str(self._listbox.cget('yscrollcommand'))
if tclCommandName != '':
self._listbox.deletecommand(tclCommandName)
if self['hscrollmode'] == self['vscrollmode'] == 'dynamic':
self._listbox.configure(
xscrollcommand=self._scrollBothLater,
yscrollcommand=self._scrollBothLater
)
else:
self._listbox.configure(
xscrollcommand=self._scrollXNow,
yscrollcommand=self._scrollYNow
)
def _scrollXNow(self, first, last):
self._horizScrollbar.set(first, last)
self._horizScrollbarNeeded = ((first, last) != ('0', '1'))
if self['hscrollmode'] == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
def _scrollYNow(self, first, last):
self._vertScrollbar.set(first, last)
self._vertScrollbarNeeded = ((first, last) != ('0', '1'))
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _scrollBothLater(self, first, last):
# Called by the listbox to set the horizontal or vertical
# scrollbar when it has scrolled or changed size or contents.
if self.scrollTimer is None:
self.scrollTimer = self.after_idle(self._scrollBothNow)
def _scrollBothNow(self):
# This performs the function of _scrollXNow and _scrollYNow.
# If one is changed, the other should be updated to match.
self.scrollTimer = None
# Call update_idletasks to make sure that the containing frame
# has been resized before we attempt to set the scrollbars.
# Otherwise the scrollbars may be mapped/unmapped continuously.
self._scrollRecurse = self._scrollRecurse + 1
self.update_idletasks()
self._scrollRecurse = self._scrollRecurse - 1
if self._scrollRecurse != 0:
return
xview = self._listbox.xview()
yview = self._listbox.yview()
self._horizScrollbar.set(xview[0], xview[1])
self._vertScrollbar.set(yview[0], yview[1])
self._horizScrollbarNeeded = (xview != (0.0, 1.0))
self._vertScrollbarNeeded = (yview != (0.0, 1.0))
# If both horizontal and vertical scrollmodes are dynamic and
# currently only one scrollbar is mapped and both should be
# toggled, then unmap the mapped scrollbar. This prevents a
# continuous mapping and unmapping of the scrollbars.
if (self['hscrollmode'] == self['vscrollmode'] == 'dynamic' and
self._horizScrollbarNeeded != self._horizScrollbarOn and
self._vertScrollbarNeeded != self._vertScrollbarOn and
self._vertScrollbarOn != self._horizScrollbarOn):
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
self._toggleVertScrollbar()
return
if self['hscrollmode'] == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _toggleHorizScrollbar(self):
self._horizScrollbarOn = not self._horizScrollbarOn
interior = self.interior()
if self._horizScrollbarOn:
self._horizScrollbar.grid(row = 4, column = 2, sticky = 'news')
interior.grid_rowconfigure(3, minsize = self['scrollmargin'])
else:
self._horizScrollbar.grid_forget()
interior.grid_rowconfigure(3, minsize = 0)
def _toggleVertScrollbar(self):
self._vertScrollbarOn = not self._vertScrollbarOn
interior = self.interior()
if self._vertScrollbarOn:
self._vertScrollbar.grid(row = 2, column = 4, sticky = 'news')
interior.grid_columnconfigure(3, minsize = self['scrollmargin'])
else:
self._vertScrollbar.grid_forget()
interior.grid_columnconfigure(3, minsize = 0)
def _handleEvent(self, event, eventType):
if eventType == 'double':
command = self['dblclickcommand']
elif eventType == 'key':
command = self['selectioncommand']
else: #eventType == 'release'
# Do not execute the command if the mouse was released
# outside the listbox.
if (event.x < 0 or self._listbox.winfo_width() <= event.x or
event.y < 0 or self._listbox.winfo_height() <= event.y):
return
command = self['selectioncommand']
if callable(command):
command()
# Need to explicitly forward this to override the stupid
# (grid_)size method inherited from Tkinter.Frame.Grid.
def size(self):
return self._listbox.size()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Frame.Grid.
def bbox(self, index):
return self._listbox.bbox(index)
forwardmethods(ScrolledListBox, Tkinter.Listbox, '_listbox')
# ======================================================================
_listboxCache = {}
def _registerScrolledList(listbox, scrolledList):
# Register an ScrolledList widget for a Listbox widget
_listboxCache[listbox] = scrolledList
def _deregisterScrolledList(listbox):
# Deregister a Listbox widget
del _listboxCache[listbox]
def _handleEvent(event, eventType):
# Forward events for a Listbox to it's ScrolledListBox
# A binding earlier in the bindtags list may have destroyed the
# megawidget, so need to check.
if _listboxCache.has_key(event.widget):
_listboxCache[event.widget]._handleEvent(event, eventType)
######################################################################
### File: PmwScrolledText.py
# Based on iwidgets2.2.0/scrolledtext.itk code.
import Tkinter
class ScrolledText(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderframe', 0, INITOPT),
('columnheader', 0, INITOPT),
('hscrollmode', 'dynamic', self._hscrollMode),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('rowcolumnheader',0, INITOPT),
('rowheader', 0, INITOPT),
('scrollmargin', 2, INITOPT),
('usehullsize', 0, INITOPT),
('vscrollmode', 'dynamic', self._vscrollMode),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
if self['usehullsize']:
interior.grid_propagate(0)
if self['borderframe']:
# Create a frame widget to act as the border of the text
# widget. Later, pack the text widget so that it fills
# the frame. This avoids a problem in Tk, where window
# items in a text widget may overlap the border of the
# text widget.
self._borderframe = self.createcomponent('borderframe',
(), None,
Tkinter.Frame, (interior,),
relief = 'sunken',
borderwidth = 2,
)
self._borderframe.grid(row = 4, column = 4, sticky = 'news')
# Create the text widget.
self._textbox = self.createcomponent('text',
(), None,
Tkinter.Text, (self._borderframe,),
highlightthickness = 0,
borderwidth = 0,
)
self._textbox.pack(fill = 'both', expand = 1)
bw = self._borderframe.cget('borderwidth'),
ht = self._borderframe.cget('highlightthickness'),
else:
# Create the text widget.
self._textbox = self.createcomponent('text',
(), None,
Tkinter.Text, (interior,),
)
self._textbox.grid(row = 4, column = 4, sticky = 'news')
bw = self._textbox.cget('borderwidth'),
ht = self._textbox.cget('highlightthickness'),
# Create the header text widgets
if self['columnheader']:
self._columnheader = self.createcomponent('columnheader',
(), 'Header',
Tkinter.Text, (interior,),
height=1,
wrap='none',
borderwidth = bw,
highlightthickness = ht,
)
self._columnheader.grid(row = 2, column = 4, sticky = 'ew')
self._columnheader.configure(
xscrollcommand = self._columnheaderscrolled)
if self['rowheader']:
self._rowheader = self.createcomponent('rowheader',
(), 'Header',
Tkinter.Text, (interior,),
wrap='none',
borderwidth = bw,
highlightthickness = ht,
)
self._rowheader.grid(row = 4, column = 2, sticky = 'ns')
self._rowheader.configure(
yscrollcommand = self._rowheaderscrolled)
if self['rowcolumnheader']:
self._rowcolumnheader = self.createcomponent('rowcolumnheader',
(), 'Header',
Tkinter.Text, (interior,),
height=1,
wrap='none',
borderwidth = bw,
highlightthickness = ht,
)
self._rowcolumnheader.grid(row = 2, column = 2, sticky = 'nsew')
interior.grid_rowconfigure(4, weight = 1, minsize = 0)
interior.grid_columnconfigure(4, weight = 1, minsize = 0)
# Create the horizontal scrollbar
self._horizScrollbar = self.createcomponent('horizscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (interior,),
orient='horizontal',
command=self._textbox.xview
)
# Create the vertical scrollbar
self._vertScrollbar = self.createcomponent('vertscrollbar',
(), 'Scrollbar',
Tkinter.Scrollbar, (interior,),
orient='vertical',
command=self._textbox.yview
)
self.createlabel(interior, childCols = 5, childRows = 5)
# Initialise instance variables.
self._horizScrollbarOn = 0
self._vertScrollbarOn = 0
self.scrollTimer = None
self._scrollRecurse = 0
self._horizScrollbarNeeded = 0
self._vertScrollbarNeeded = 0
self._textWidth = None
# These four variables avoid an infinite loop caused by the
# row or column header's scrollcommand causing the main text
# widget's scrollcommand to be called and vice versa.
self._textboxLastX = None
self._textboxLastY = None
self._columnheaderLastX = None
self._rowheaderLastY = None
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self.scrollTimer is not None:
self.after_cancel(self.scrollTimer)
self.scrollTimer = None
MegaWidget.destroy(self)
# ======================================================================
# Public methods.
def clear(self):
self.settext('')
def importfile(self, fileName, where = 'end'):
file = open(fileName, 'r')
self._textbox.insert(where, file.read())
file.close()
def exportfile(self, fileName):
file = open(fileName, 'w')
file.write(self._textbox.get('1.0', 'end'))
file.close()
def settext(self, text):
disabled = (str(self._textbox.cget('state')) == 'disabled')
if disabled:
self._textbox.configure(state='normal')
self._textbox.delete('0.0', 'end')
self._textbox.insert('end', text)
if disabled:
self._textbox.configure(state='disabled')
# Override Tkinter.Text get method, so that if it is called with
# no arguments, return all text (consistent with other widgets).
def get(self, first=None, last=None):
if first is None:
return self._textbox.get('1.0', 'end')
else:
return self._textbox.get(first, last)
def getvalue(self):
return self.get()
def setvalue(self, text):
return self.settext(text)
def appendtext(self, text):
oldTop, oldBottom = self._textbox.yview()
disabled = (str(self._textbox.cget('state')) == 'disabled')
if disabled:
self._textbox.configure(state='normal')
self._textbox.insert('end', text)
if disabled:
self._textbox.configure(state='disabled')
if oldBottom == 1.0:
self._textbox.yview('moveto', 1.0)
# ======================================================================
# Configuration methods.
def _hscrollMode(self):
# The horizontal scroll mode has been configured.
mode = self['hscrollmode']
if mode == 'static':
if not self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'dynamic':
if self._horizScrollbarNeeded != self._horizScrollbarOn:
self._toggleHorizScrollbar()
elif mode == 'none':
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
message = 'bad hscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
def _vscrollMode(self):
# The vertical scroll mode has been configured.
mode = self['vscrollmode']
if mode == 'static':
if not self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
elif mode == 'none':
if self._vertScrollbarOn:
self._toggleVertScrollbar()
else:
message = 'bad vscrollmode option "%s": should be static, dynamic, or none' % mode
raise ValueError, message
self._configureScrollCommands()
# ======================================================================
# Private methods.
def _configureScrollCommands(self):
# If both scrollmodes are not dynamic we can save a lot of
# time by not having to create an idle job to handle the
# scroll commands.
# Clean up previous scroll commands to prevent memory leak.
tclCommandName = str(self._textbox.cget('xscrollcommand'))
if tclCommandName != '':
self._textbox.deletecommand(tclCommandName)
tclCommandName = str(self._textbox.cget('yscrollcommand'))
if tclCommandName != '':
self._textbox.deletecommand(tclCommandName)
if self['hscrollmode'] == self['vscrollmode'] == 'dynamic':
self._textbox.configure(
xscrollcommand=self._scrollBothLater,
yscrollcommand=self._scrollBothLater
)
else:
self._textbox.configure(
xscrollcommand=self._scrollXNow,
yscrollcommand=self._scrollYNow
)
def _scrollXNow(self, first, last):
self._horizScrollbar.set(first, last)
self._horizScrollbarNeeded = ((first, last) != ('0', '1'))
# This code is the same as in _scrollBothNow. Keep it that way.
if self['hscrollmode'] == 'dynamic':
currentWidth = self._textbox.winfo_width()
if self._horizScrollbarNeeded != self._horizScrollbarOn:
if self._horizScrollbarNeeded or \
self._textWidth != currentWidth:
self._toggleHorizScrollbar()
self._textWidth = currentWidth
if self['columnheader']:
if self._columnheaderLastX != first:
self._columnheaderLastX = first
self._columnheader.xview('moveto', first)
def _scrollYNow(self, first, last):
if first == '0' and last == '0':
return
self._vertScrollbar.set(first, last)
self._vertScrollbarNeeded = ((first, last) != ('0', '1'))
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
if self['rowheader']:
if self._rowheaderLastY != first:
self._rowheaderLastY = first
self._rowheader.yview('moveto', first)
def _scrollBothLater(self, first, last):
# Called by the text widget to set the horizontal or vertical
# scrollbar when it has scrolled or changed size or contents.
if self.scrollTimer is None:
self.scrollTimer = self.after_idle(self._scrollBothNow)
def _scrollBothNow(self):
# This performs the function of _scrollXNow and _scrollYNow.
# If one is changed, the other should be updated to match.
self.scrollTimer = None
# Call update_idletasks to make sure that the containing frame
# has been resized before we attempt to set the scrollbars.
# Otherwise the scrollbars may be mapped/unmapped continuously.
self._scrollRecurse = self._scrollRecurse + 1
self.update_idletasks()
self._scrollRecurse = self._scrollRecurse - 1
if self._scrollRecurse != 0:
return
xview = self._textbox.xview()
yview = self._textbox.yview()
# The text widget returns a yview of (0.0, 0.0) just after it
# has been created. Ignore this.
if yview == (0.0, 0.0):
return
if self['columnheader']:
if self._columnheaderLastX != xview[0]:
self._columnheaderLastX = xview[0]
self._columnheader.xview('moveto', xview[0])
if self['rowheader']:
if self._rowheaderLastY != yview[0]:
self._rowheaderLastY = yview[0]
self._rowheader.yview('moveto', yview[0])
self._horizScrollbar.set(xview[0], xview[1])
self._vertScrollbar.set(yview[0], yview[1])
self._horizScrollbarNeeded = (xview != (0.0, 1.0))
self._vertScrollbarNeeded = (yview != (0.0, 1.0))
# If both horizontal and vertical scrollmodes are dynamic and
# currently only one scrollbar is mapped and both should be
# toggled, then unmap the mapped scrollbar. This prevents a
# continuous mapping and unmapping of the scrollbars.
if (self['hscrollmode'] == self['vscrollmode'] == 'dynamic' and
self._horizScrollbarNeeded != self._horizScrollbarOn and
self._vertScrollbarNeeded != self._vertScrollbarOn and
self._vertScrollbarOn != self._horizScrollbarOn):
if self._horizScrollbarOn:
self._toggleHorizScrollbar()
else:
self._toggleVertScrollbar()
return
if self['hscrollmode'] == 'dynamic':
# The following test is done to prevent continuous
# mapping and unmapping of the horizontal scrollbar.
# This may occur when some event (scrolling, resizing
# or text changes) modifies the displayed text such
# that the bottom line in the window is the longest
# line displayed. If this causes the horizontal
# scrollbar to be mapped, the scrollbar may "cover up"
# the bottom line, which would mean that the scrollbar
# is no longer required. If the scrollbar is then
# unmapped, the bottom line will then become visible
# again, which would cause the scrollbar to be mapped
# again, and so on...
#
# The idea is that, if the width of the text widget
# has not changed and the scrollbar is currently
# mapped, then do not unmap the scrollbar even if it
# is no longer required. This means that, during
# normal scrolling of the text, once the horizontal
# scrollbar has been mapped it will not be unmapped
# (until the width of the text widget changes).
currentWidth = self._textbox.winfo_width()
if self._horizScrollbarNeeded != self._horizScrollbarOn:
if self._horizScrollbarNeeded or \
self._textWidth != currentWidth:
self._toggleHorizScrollbar()
self._textWidth = currentWidth
if self['vscrollmode'] == 'dynamic':
if self._vertScrollbarNeeded != self._vertScrollbarOn:
self._toggleVertScrollbar()
def _columnheaderscrolled(self, first, last):
if self._textboxLastX != first:
self._textboxLastX = first
self._textbox.xview('moveto', first)
def _rowheaderscrolled(self, first, last):
if self._textboxLastY != first:
self._textboxLastY = first
self._textbox.yview('moveto', first)
def _toggleHorizScrollbar(self):
self._horizScrollbarOn = not self._horizScrollbarOn
interior = self.interior()
if self._horizScrollbarOn:
self._horizScrollbar.grid(row = 6, column = 4, sticky = 'news')
interior.grid_rowconfigure(5, minsize = self['scrollmargin'])
else:
self._horizScrollbar.grid_forget()
interior.grid_rowconfigure(5, minsize = 0)
def _toggleVertScrollbar(self):
self._vertScrollbarOn = not self._vertScrollbarOn
interior = self.interior()
if self._vertScrollbarOn:
self._vertScrollbar.grid(row = 4, column = 6, sticky = 'news')
interior.grid_columnconfigure(5, minsize = self['scrollmargin'])
else:
self._vertScrollbar.grid_forget()
interior.grid_columnconfigure(5, minsize = 0)
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Frame.Grid.
def bbox(self, index):
return self._textbox.bbox(index)
forwardmethods(ScrolledText, Tkinter.Text, '_textbox')
######################################################################
### File: PmwHistoryText.py
_ORIGINAL = 0
_MODIFIED = 1
_DISPLAY = 2
class HistoryText(ScrolledText):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('compressany', 1, None),
('compresstail', 1, None),
('historycommand', None, None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
ScrolledText.__init__(self, parent)
# Initialise instance variables.
self._list = []
self._currIndex = 0
self._pastIndex = None
self._lastIndex = 0 # pointer to end of history list
# Check keywords and initialise options.
self.initialiseoptions()
def addhistory(self):
text = self.get()
if text[-1] == '\n':
text = text[:-1]
if len(self._list) == 0:
# This is the first history entry. Add it.
self._list.append([text, text, _MODIFIED])
return
currentEntry = self._list[self._currIndex]
if text == currentEntry[_ORIGINAL]:
# The current history entry has not been modified. Check if
# we need to add it again.
if self['compresstail'] and self._currIndex == self._lastIndex:
return
if self['compressany']:
return
# Undo any changes for the current history entry, since they
# will now be available in the new entry.
currentEntry[_MODIFIED] = currentEntry[_ORIGINAL]
historycommand = self['historycommand']
if self._currIndex == self._lastIndex:
# The last history entry is currently being displayed,
# so disable the special meaning of the 'Next' button.
self._pastIndex = None
nextState = 'disabled'
else:
# A previous history entry is currently being displayed,
# so allow the 'Next' button to go to the entry after this one.
self._pastIndex = self._currIndex
nextState = 'normal'
if callable(historycommand):
historycommand('normal', nextState)
# Create the new history entry.
self._list.append([text, text, _MODIFIED])
# Move the pointer into the history entry list to the end.
self._lastIndex = self._lastIndex + 1
self._currIndex = self._lastIndex
def next(self):
if self._currIndex == self._lastIndex and self._pastIndex is None:
self.bell()
else:
self._modifyDisplay('next')
def prev(self):
self._pastIndex = None
if self._currIndex == 0:
self.bell()
else:
self._modifyDisplay('prev')
def undo(self):
if len(self._list) != 0:
self._modifyDisplay('undo')
def redo(self):
if len(self._list) != 0:
self._modifyDisplay('redo')
def gethistory(self):
return self._list
def _modifyDisplay(self, command):
# Modify the display to show either the next or previous
# history entry (next, prev) or the original or modified
# version of the current history entry (undo, redo).
# Save the currently displayed text.
currentText = self.get()
if currentText[-1] == '\n':
currentText = currentText[:-1]
currentEntry = self._list[self._currIndex]
if currentEntry[_DISPLAY] == _MODIFIED:
currentEntry[_MODIFIED] = currentText
elif currentEntry[_ORIGINAL] != currentText:
currentEntry[_MODIFIED] = currentText
if command in ('next', 'prev'):
currentEntry[_DISPLAY] = _MODIFIED
if command in ('next', 'prev'):
prevstate = 'normal'
nextstate = 'normal'
if command == 'next':
if self._pastIndex is not None:
self._currIndex = self._pastIndex
self._pastIndex = None
self._currIndex = self._currIndex + 1
if self._currIndex == self._lastIndex:
nextstate = 'disabled'
elif command == 'prev':
self._currIndex = self._currIndex - 1
if self._currIndex == 0:
prevstate = 'disabled'
historycommand = self['historycommand']
if callable(historycommand):
historycommand(prevstate, nextstate)
currentEntry = self._list[self._currIndex]
else:
if command == 'undo':
currentEntry[_DISPLAY] = _ORIGINAL
elif command == 'redo':
currentEntry[_DISPLAY] = _MODIFIED
# Display the new text.
self.delete('1.0', 'end')
self.insert('end', currentEntry[currentEntry[_DISPLAY]])
######################################################################
### File: PmwSelectionDialog.py
# Not Based on iwidgets version.
class SelectionDialog(Dialog):
# Dialog window with selection list.
# Dialog window displaying a list and requesting the user to
# select one.
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 10, INITOPT),
('bordery', 10, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
aliases = (
('listbox', 'scrolledlist_listbox'),
('label', 'scrolledlist_label'),
)
self._list = self.createcomponent('scrolledlist',
aliases, None,
ScrolledListBox, (interior,),
dblclickcommand = self.invoke)
self._list.pack(side='top', expand='true', fill='both',
padx = self['borderx'], pady = self['bordery'])
if not kw.has_key('activatecommand'):
# Whenever this dialog is activated, set the focus to the
# ScrolledListBox's listbox widget.
listbox = self.component('listbox')
self.configure(activatecommand = listbox.focus_set)
# Check keywords and initialise options.
self.initialiseoptions()
# Need to explicitly forward this to override the stupid
# (grid_)size method inherited from Tkinter.Toplevel.Grid.
def size(self):
return self.component('listbox').size()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Toplevel.Grid.
def bbox(self, index):
return self.component('listbox').size(index)
forwardmethods(SelectionDialog, ScrolledListBox, '_list')
######################################################################
### File: PmwTextDialog.py
# A Dialog with a ScrolledText widget.
class TextDialog(Dialog):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 10, INITOPT),
('bordery', 10, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
aliases = (
('text', 'scrolledtext_text'),
('label', 'scrolledtext_label'),
)
self._text = self.createcomponent('scrolledtext',
aliases, None,
ScrolledText, (interior,))
self._text.pack(side='top', expand=1, fill='both',
padx = self['borderx'], pady = self['bordery'])
# Check keywords and initialise options.
self.initialiseoptions()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Toplevel.Grid.
def bbox(self, index):
return self._text.bbox(index)
forwardmethods(TextDialog, ScrolledText, '_text')
######################################################################
### File: PmwTimeCounter.py
# Authors: Joe VanAndel and Greg McFarlane
import string
import sys
import time
import Tkinter
class TimeCounter(MegaWidget):
"""Up-down counter
A TimeCounter is a single-line entry widget with Up and Down arrows
which increment and decrement the Time value in the entry.
"""
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('autorepeat', 1, None),
('buttonaspect', 1.0, INITOPT),
('command', None, None),
('initwait', 300, None),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('max', None, self._max),
('min', None, self._min),
('padx', 0, INITOPT),
('pady', 0, INITOPT),
('repeatrate', 50, None),
('value', None, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
self.arrowDirection = {}
self._flag = 'stopped'
self._timerId = None
self._createComponents(kw)
value = self['value']
if value is None:
now = time.time()
value = time.strftime('%H:%M:%S', time.localtime(now))
self.setvalue(value)
# Check keywords and initialise options.
self.initialiseoptions()
def _createComponents(self, kw):
# Create the components.
interior = self.interior()
# If there is no label, put the arrows and the entry directly
# into the interior, otherwise create a frame for them. In
# either case the border around the arrows and the entry will
# be raised (but not around the label).
if self['labelpos'] is None:
frame = interior
if not kw.has_key('hull_relief'):
frame.configure(relief = 'raised')
if not kw.has_key('hull_borderwidth'):
frame.configure(borderwidth = 1)
else:
frame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (interior,),
relief = 'raised', borderwidth = 1)
frame.grid(column=2, row=2, sticky='nsew')
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
# Create the down arrow buttons.
# Create the hour down arrow.
self._downHourArrowBtn = self.createcomponent('downhourarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._downHourArrowBtn] = 'down'
self._downHourArrowBtn.grid(column = 0, row = 2)
# Create the minute down arrow.
self._downMinuteArrowBtn = self.createcomponent('downminutearrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._downMinuteArrowBtn] = 'down'
self._downMinuteArrowBtn.grid(column = 1, row = 2)
# Create the second down arrow.
self._downSecondArrowBtn = self.createcomponent('downsecondarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._downSecondArrowBtn] = 'down'
self._downSecondArrowBtn.grid(column = 2, row = 2)
# Create the entry fields.
# Create the hour entry field.
self._hourCounterEntry = self.createcomponent('hourentryfield',
(('hourentry', 'hourentryfield_entry'),), None,
EntryField, (frame,), validate='integer', entry_width = 2)
self._hourCounterEntry.grid(column = 0, row = 1, sticky = 'news')
# Create the minute entry field.
self._minuteCounterEntry = self.createcomponent('minuteentryfield',
(('minuteentry', 'minuteentryfield_entry'),), None,
EntryField, (frame,), validate='integer', entry_width = 2)
self._minuteCounterEntry.grid(column = 1, row = 1, sticky = 'news')
# Create the second entry field.
self._secondCounterEntry = self.createcomponent('secondentryfield',
(('secondentry', 'secondentryfield_entry'),), None,
EntryField, (frame,), validate='integer', entry_width = 2)
self._secondCounterEntry.grid(column = 2, row = 1, sticky = 'news')
# Create the up arrow buttons.
# Create the hour up arrow.
self._upHourArrowBtn = self.createcomponent('uphourarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._upHourArrowBtn] = 'up'
self._upHourArrowBtn.grid(column = 0, row = 0)
# Create the minute up arrow.
self._upMinuteArrowBtn = self.createcomponent('upminutearrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._upMinuteArrowBtn] = 'up'
self._upMinuteArrowBtn.grid(column = 1, row = 0)
# Create the second up arrow.
self._upSecondArrowBtn = self.createcomponent('upsecondarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
self.arrowDirection[self._upSecondArrowBtn] = 'up'
self._upSecondArrowBtn.grid(column = 2, row = 0)
# Make it resize nicely.
padx = self['padx']
pady = self['pady']
for col in range(3):
frame.grid_columnconfigure(col, weight = 1, pad = padx)
frame.grid_rowconfigure(0, pad = pady)
frame.grid_rowconfigure(2, pad = pady)
frame.grid_rowconfigure(1, weight = 1)
# Create the label.
self.createlabel(interior)
# Set bindings.
# Up hour
self._upHourArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._upHourArrowBtn:
s._drawArrow(button, 'up'))
self._upHourArrowBtn.bind('<1>',
lambda event, s=self,button=self._upHourArrowBtn:
s._countUp(button, 3600))
self._upHourArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._upHourArrowBtn:
s._stopUpDown(button))
# Up minute
self._upMinuteArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._upMinuteArrowBtn:
s._drawArrow(button, 'up'))
self._upMinuteArrowBtn.bind('<1>',
lambda event, s=self,button=self._upMinuteArrowBtn:
s._countUp(button, 60))
self._upMinuteArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._upMinuteArrowBtn:
s._stopUpDown(button))
# Up second
self._upSecondArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._upSecondArrowBtn:
s._drawArrow(button, 'up'))
self._upSecondArrowBtn.bind('<1>',
lambda event, s=self,button=self._upSecondArrowBtn:
s._countUp(button, 1))
self._upSecondArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._upSecondArrowBtn:
s._stopUpDown(button))
# Down hour
self._downHourArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._downHourArrowBtn:
s._drawArrow(button, 'down'))
self._downHourArrowBtn.bind('<1>',
lambda event, s=self,button=self._downHourArrowBtn:
s._countDown(button, 3600))
self._downHourArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._downHourArrowBtn:
s._stopUpDown(button))
# Down minute
self._downMinuteArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._downMinuteArrowBtn:
s._drawArrow(button, 'down'))
self._downMinuteArrowBtn.bind('<1>',
lambda event, s=self,button=self._downMinuteArrowBtn:
s._countDown(button, 60))
self._downMinuteArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._downMinuteArrowBtn:
s._stopUpDown(button))
# Down second
self._downSecondArrowBtn.bind('<Configure>',
lambda event, s=self,button=self._downSecondArrowBtn:
s._drawArrow(button, 'down'))
self._downSecondArrowBtn.bind('<1>',
lambda event, s=self, button=self._downSecondArrowBtn:
s._countDown(button,1))
self._downSecondArrowBtn.bind('<Any-ButtonRelease-1>',
lambda event, s=self, button=self._downSecondArrowBtn:
s._stopUpDown(button))
self._hourCounterEntry.component('entry').bind(
'<Return>', self._invoke)
self._minuteCounterEntry.component('entry').bind(
'<Return>', self._invoke)
self._secondCounterEntry.component('entry').bind(
'<Return>', self._invoke)
self._hourCounterEntry.bind('<Configure>', self._resizeArrow)
self._minuteCounterEntry.bind('<Configure>', self._resizeArrow)
self._secondCounterEntry.bind('<Configure>', self._resizeArrow)
def _drawArrow(self, arrow, direction):
drawarrow(arrow, self['hourentry_foreground'], direction, 'arrow')
def _resizeArrow(self, event = None):
for btn in (self._upHourArrowBtn, self._upMinuteArrowBtn,
self._upSecondArrowBtn,
self._downHourArrowBtn,
self._downMinuteArrowBtn, self._downSecondArrowBtn):
bw = (string.atoi(btn['borderwidth']) +
string.atoi(btn['highlightthickness']))
newHeight = self._hourCounterEntry.winfo_reqheight() - 2 * bw
newWidth = int(newHeight * self['buttonaspect'])
btn.configure(width=newWidth, height=newHeight)
self._drawArrow(btn, self.arrowDirection[btn])
def _min(self):
min = self['min']
if min is None:
self._minVal = 0
else:
self._minVal = timestringtoseconds(min)
def _max(self):
max = self['max']
if max is None:
self._maxVal = None
else:
self._maxVal = timestringtoseconds(max)
def getvalue(self):
return self.getstring()
def setvalue(self, text):
list = string.split(text, ':')
if len(list) != 3:
raise ValueError, 'invalid value: ' + text
self._hour = string.atoi(list[0])
self._minute = string.atoi(list[1])
self._second = string.atoi(list[2])
self._setHMS()
def getstring(self):
return '%02d:%02d:%02d' % (self._hour, self._minute, self._second)
def getint(self):
return self._hour * 3600 + self._minute * 60 + self._second
def _countUp(self, button, increment):
self._relief = self._upHourArrowBtn.cget('relief')
button.configure(relief='sunken')
self._count(1, 'start', increment)
def _countDown(self, button, increment):
self._relief = self._downHourArrowBtn.cget('relief')
button.configure(relief='sunken')
self._count(-1, 'start', increment)
def increment(self, seconds = 1):
self._count(1, 'force', seconds)
def decrement(self, seconds = 1):
self._count(-1, 'force', seconds)
def _count(self, factor, newFlag = None, increment = 1):
if newFlag != 'force':
if newFlag is not None:
self._flag = newFlag
if self._flag == 'stopped':
return
value = (string.atoi(self._hourCounterEntry.get()) *3600) + \
(string.atoi(self._minuteCounterEntry.get()) *60) + \
string.atoi(self._secondCounterEntry.get()) + \
factor * increment
min = self._minVal
max = self._maxVal
if value < min:
value = min
if max is not None and value > max:
value = max
self._hour = value /3600
self._minute = (value - (self._hour*3600)) / 60
self._second = value - (self._hour*3600) - (self._minute*60)
self._setHMS()
if newFlag != 'force':
if self['autorepeat']:
if self._flag == 'start':
delay = self['initwait']
self._flag = 'running'
else:
delay = self['repeatrate']
self._timerId = self.after(
delay, lambda self=self, factor=factor,increment=increment:
self._count(factor,'running', increment))
def _setHMS(self):
self._hourCounterEntry.setentry('%02d' % self._hour)
self._minuteCounterEntry.setentry('%02d' % self._minute)
self._secondCounterEntry.setentry('%02d' % self._second)
def _stopUpDown(self, button):
if self._timerId is not None:
self.after_cancel(self._timerId)
self._timerId = None
button.configure(relief=self._relief)
self._flag = 'stopped'
def _invoke(self, event):
cmd = self['command']
if callable(cmd):
cmd()
def invoke(self):
cmd = self['command']
if callable(cmd):
return cmd()
def destroy(self):
if self._timerId is not None:
self.after_cancel(self._timerId)
self._timerId = None
MegaWidget.destroy(self)
######################################################################
### File: PmwAboutDialog.py
class AboutDialog(MessageDialog):
# Window to display version and contact information.
# Class members containing resettable 'default' values:
_version = ''
_copyright = ''
_contact = ''
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('applicationname', '', INITOPT),
('iconpos', 'w', None),
('icon_bitmap', 'info', None),
('buttons', ('Close',), None),
('defaultbutton', 0, None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MessageDialog.__init__(self, parent)
applicationname = self['applicationname']
if not kw.has_key('title'):
self.configure(title = 'About ' + applicationname)
if not kw.has_key('message_text'):
text = applicationname + '\n\n'
if AboutDialog._version != '':
text = text + 'Version ' + AboutDialog._version + '\n'
if AboutDialog._copyright != '':
text = text + AboutDialog._copyright + '\n\n'
if AboutDialog._contact != '':
text = text + AboutDialog._contact
self.configure(message_text=text)
# Check keywords and initialise options.
self.initialiseoptions()
def aboutversion(value):
AboutDialog._version = value
def aboutcopyright(value):
AboutDialog._copyright = value
def aboutcontact(value):
AboutDialog._contact = value
######################################################################
### File: PmwComboBox.py
# Based on iwidgets2.2.0/combobox.itk code.
import os
import string
import types
import Tkinter
class ComboBox(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('autoclear', 0, INITOPT),
('buttonaspect', 1.0, INITOPT),
('dropdown', 1, INITOPT),
('fliparrow', 0, INITOPT),
('history', 1, INITOPT),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('listheight', 200, INITOPT),
('selectioncommand', None, None),
('sticky', 'ew', INITOPT),
('unique', 1, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._entryfield = self.createcomponent('entryfield',
(('entry', 'entryfield_entry'),), None,
EntryField, (interior,))
self._entryfield.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight = 1)
self._entryWidget = self._entryfield.component('entry')
if self['dropdown']:
self._isPosted = 0
interior.grid_rowconfigure(2, weight = 1)
# Create the arrow button.
self._arrowBtn = self.createcomponent('arrowbutton',
(), None,
Tkinter.Canvas, (interior,), borderwidth = 2,
relief = 'raised',
width = 16, height = 16)
if 'n' in self['sticky']:
sticky = 'n'
else:
sticky = ''
if 's' in self['sticky']:
sticky = sticky + 's'
self._arrowBtn.grid(column=3, row=2, sticky = sticky)
self._arrowRelief = self._arrowBtn.cget('relief')
# Create the label.
self.createlabel(interior, childCols=2)
# Create the dropdown window.
self._popup = self.createcomponent('popup',
(), None,
Tkinter.Toplevel, (interior,))
self._popup.withdraw()
self._popup.overrideredirect(1)
# Create the scrolled listbox inside the dropdown window.
self._list = self.createcomponent('scrolledlist',
(('listbox', 'scrolledlist_listbox'),), None,
ScrolledListBox, (self._popup,),
hull_borderwidth = 2,
hull_relief = 'raised',
hull_height = self['listheight'],
usehullsize = 1,
listbox_exportselection = 0)
self._list.pack(expand=1, fill='both')
self.__listbox = self._list.component('listbox')
# Bind events to the arrow button.
self._arrowBtn.bind('<1>', self._postList)
self._arrowBtn.bind('<Configure>', self._drawArrow)
self._arrowBtn.bind('<3>', self._next)
self._arrowBtn.bind('<Shift-3>', self._previous)
self._arrowBtn.bind('<Down>', self._next)
self._arrowBtn.bind('<Up>', self._previous)
self._arrowBtn.bind('<Control-n>', self._next)
self._arrowBtn.bind('<Control-p>', self._previous)
self._arrowBtn.bind('<Shift-Down>', self._postList)
self._arrowBtn.bind('<Shift-Up>', self._postList)
self._arrowBtn.bind('<F34>', self._postList)
self._arrowBtn.bind('<F28>', self._postList)
self._arrowBtn.bind('<space>', self._postList)
# Bind events to the dropdown window.
self._popup.bind('<Escape>', self._unpostList)
self._popup.bind('<space>', self._selectUnpost)
self._popup.bind('<Return>', self._selectUnpost)
self._popup.bind('<ButtonRelease-1>', self._dropdownBtnRelease)
self._popup.bind('<ButtonPress-1>', self._unpostOnNextRelease)
# Bind events to the Tk listbox.
self.__listbox.bind('<Enter>', self._unpostOnNextRelease)
# Bind events to the Tk entry widget.
self._entryWidget.bind('<Configure>', self._resizeArrow)
self._entryWidget.bind('<Shift-Down>', self._postList)
self._entryWidget.bind('<Shift-Up>', self._postList)
self._entryWidget.bind('<F34>', self._postList)
self._entryWidget.bind('<F28>', self._postList)
# Need to unpost the popup if the entryfield is unmapped (eg:
# its toplevel window is withdrawn) while the popup list is
# displayed.
self._entryWidget.bind('<Unmap>', self._unpostList)
else:
# Create the scrolled listbox below the entry field.
self._list = self.createcomponent('scrolledlist',
(('listbox', 'scrolledlist_listbox'),), None,
ScrolledListBox, (interior,),
selectioncommand = self._selectCmd)
self._list.grid(column=2, row=3, sticky='nsew')
self.__listbox = self._list.component('listbox')
# The scrolled listbox should expand vertically.
interior.grid_rowconfigure(3, weight = 1)
# Create the label.
self.createlabel(interior, childRows=2)
self._entryWidget.bind('<Down>', self._next)
self._entryWidget.bind('<Up>', self._previous)
self._entryWidget.bind('<Control-n>', self._next)
self._entryWidget.bind('<Control-p>', self._previous)
self.__listbox.bind('<Control-n>', self._next)
self.__listbox.bind('<Control-p>', self._previous)
if self['history']:
self._entryfield.configure(command=self._addHistory)
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self['dropdown'] and self._isPosted:
popgrab(self._popup)
MegaWidget.destroy(self)
#======================================================================
# Public methods
def get(self, first = None, last=None):
if first is None:
return self._entryWidget.get()
else:
return self._list.get(first, last)
def invoke(self):
if self['dropdown']:
self._postList()
else:
return self._selectCmd()
def selectitem(self, index, setentry=1):
if type(index) == types.StringType:
text = index
items = self._list.get(0, 'end')
if text in items:
index = list(items).index(text)
else:
raise IndexError, 'index "%s" not found' % text
elif setentry:
text = self._list.get(0, 'end')[index]
self._list.select_clear(0, 'end')
self._list.select_set(index, index)
self._list.activate(index)
self.see(index)
if setentry:
self._entryfield.setentry(text)
# Need to explicitly forward this to override the stupid
# (grid_)size method inherited from Tkinter.Frame.Grid.
def size(self):
return self._list.size()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Frame.Grid.
def bbox(self, index):
return self._list.bbox(index)
def clear(self):
self._entryfield.clear()
self._list.clear()
#======================================================================
# Private methods for both dropdown and simple comboboxes.
def _addHistory(self):
input = self._entryWidget.get()
if input != '':
index = None
if self['unique']:
# If item is already in list, select it and return.
items = self._list.get(0, 'end')
if input in items:
index = list(items).index(input)
if index is None:
index = self._list.index('end')
self._list.insert('end', input)
self.selectitem(index)
if self['autoclear']:
self._entryWidget.delete(0, 'end')
# Execute the selectioncommand on the new entry.
self._selectCmd()
def _next(self, event):
size = self.size()
if size <= 1:
return
cursels = self.curselection()
if len(cursels) == 0:
index = 0
else:
index = string.atoi(cursels[0])
if index == size - 1:
index = 0
else:
index = index + 1
self.selectitem(index)
def _previous(self, event):
size = self.size()
if size <= 1:
return
cursels = self.curselection()
if len(cursels) == 0:
index = size - 1
else:
index = string.atoi(cursels[0])
if index == 0:
index = size - 1
else:
index = index - 1
self.selectitem(index)
def _selectCmd(self, event=None):
sels = self.getcurselection()
if len(sels) == 0:
item = None
else:
item = sels[0]
self._entryfield.setentry(item)
cmd = self['selectioncommand']
if callable(cmd):
if event is None:
# Return result of selectioncommand for invoke() method.
return cmd(item)
else:
cmd(item)
#======================================================================
# Private methods for dropdown combobox.
def _drawArrow(self, event=None, sunken=0):
arrow = self._arrowBtn
if sunken:
self._arrowRelief = arrow.cget('relief')
arrow.configure(relief = 'sunken')
else:
arrow.configure(relief = self._arrowRelief)
if self._isPosted and self['fliparrow']:
direction = 'up'
else:
direction = 'down'
drawarrow(arrow, self['entry_foreground'], direction, 'arrow')
def _postList(self, event = None):
self._isPosted = 1
self._drawArrow(sunken=1)
# Make sure that the arrow is displayed sunken.
self.update_idletasks()
x = self._entryfield.winfo_rootx()
y = self._entryfield.winfo_rooty() + \
self._entryfield.winfo_height()
w = self._entryfield.winfo_width() + self._arrowBtn.winfo_width()
h = self.__listbox.winfo_height()
sh = self.winfo_screenheight()
if y + h > sh and y > sh / 2:
y = self._entryfield.winfo_rooty() - h
self._list.configure(hull_width=w)
setgeometryanddeiconify(self._popup, '+%d+%d' % (x, y))
# Grab the popup, so that all events are delivered to it, and
# set focus to the listbox, to make keyboard navigation
# easier.
pushgrab(self._popup, 1, self._unpostList)
self.__listbox.focus_set()
self._drawArrow()
# Ignore the first release of the mouse button after posting the
# dropdown list, unless the mouse enters the dropdown list.
self._ignoreRelease = 1
def _dropdownBtnRelease(self, event):
if (event.widget == self._list.component('vertscrollbar') or
event.widget == self._list.component('horizscrollbar')):
return
if self._ignoreRelease:
self._unpostOnNextRelease()
return
self._unpostList()
if (event.x >= 0 and event.x < self.__listbox.winfo_width() and
event.y >= 0 and event.y < self.__listbox.winfo_height()):
self._selectCmd()
def _unpostOnNextRelease(self, event = None):
self._ignoreRelease = 0
def _resizeArrow(self, event):
bw = (string.atoi(self._arrowBtn['borderwidth']) +
string.atoi(self._arrowBtn['highlightthickness']))
newHeight = self._entryfield.winfo_reqheight() - 2 * bw
newWidth = int(newHeight * self['buttonaspect'])
self._arrowBtn.configure(width=newWidth, height=newHeight)
self._drawArrow()
def _unpostList(self, event=None):
if not self._isPosted:
# It is possible to get events on an unposted popup. For
# example, by repeatedly pressing the space key to post
# and unpost the popup. The <space> event may be
# delivered to the popup window even though
# popgrab() has set the focus away from the
# popup window. (Bug in Tk?)
return
# Restore the focus before withdrawing the window, since
# otherwise the window manager may take the focus away so we
# can't redirect it. Also, return the grab to the next active
# window in the stack, if any.
popgrab(self._popup)
self._popup.withdraw()
self._isPosted = 0
self._drawArrow()
def _selectUnpost(self, event):
self._unpostList()
self._selectCmd()
forwardmethods(ComboBox, ScrolledListBox, '_list')
forwardmethods(ComboBox, EntryField, '_entryfield')
######################################################################
### File: PmwComboBoxDialog.py
# Not Based on iwidgets version.
class ComboBoxDialog(Dialog):
# Dialog window with simple combobox.
# Dialog window displaying a list and entry field and requesting
# the user to make a selection or enter a value
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 10, INITOPT),
('bordery', 10, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
aliases = (
('listbox', 'combobox_listbox'),
('scrolledlist', 'combobox_scrolledlist'),
('entry', 'combobox_entry'),
('label', 'combobox_label'),
)
self._combobox = self.createcomponent('combobox',
aliases, None,
ComboBox, (interior,),
scrolledlist_dblclickcommand = self.invoke,
dropdown = 0,
)
self._combobox.pack(side='top', expand='true', fill='both',
padx = self['borderx'], pady = self['bordery'])
if not kw.has_key('activatecommand'):
# Whenever this dialog is activated, set the focus to the
# ComboBox's listbox widget.
listbox = self.component('listbox')
self.configure(activatecommand = listbox.focus_set)
# Check keywords and initialise options.
self.initialiseoptions()
# Need to explicitly forward this to override the stupid
# (grid_)size method inherited from Tkinter.Toplevel.Grid.
def size(self):
return self._combobox.size()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Toplevel.Grid.
def bbox(self, index):
return self._combobox.bbox(index)
forwardmethods(ComboBoxDialog, ComboBox, '_combobox')
######################################################################
### File: PmwCounter.py
import string
import sys
import types
import Tkinter
class Counter(MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('autorepeat', 1, None),
('buttonaspect', 1.0, INITOPT),
('datatype', 'numeric', self._datatype),
('increment', 1, None),
('initwait', 300, None),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('orient', 'horizontal', INITOPT),
('padx', 0, INITOPT),
('pady', 0, INITOPT),
('repeatrate', 50, None),
('sticky', 'ew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
MegaWidget.__init__(self, parent)
# Initialise instance variables.
self._timerId = None
self._normalRelief = None
# Create the components.
interior = self.interior()
# If there is no label, put the arrows and the entry directly
# into the interior, otherwise create a frame for them. In
# either case the border around the arrows and the entry will
# be raised (but not around the label).
if self['labelpos'] is None:
frame = interior
if not kw.has_key('hull_relief'):
frame.configure(relief = 'raised')
if not kw.has_key('hull_borderwidth'):
frame.configure(borderwidth = 1)
else:
frame = self.createcomponent('frame',
(), None,
Tkinter.Frame, (interior,),
relief = 'raised', borderwidth = 1)
frame.grid(column=2, row=2, sticky=self['sticky'])
interior.grid_columnconfigure(2, weight=1)
interior.grid_rowconfigure(2, weight=1)
# Create the down arrow.
self._downArrowBtn = self.createcomponent('downarrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
# Create the entry field.
self._counterEntry = self.createcomponent('entryfield',
(('entry', 'entryfield_entry'),), None,
EntryField, (frame,))
# Create the up arrow.
self._upArrowBtn = self.createcomponent('uparrow',
(), 'Arrow',
Tkinter.Canvas, (frame,),
width = 16, height = 16, relief = 'raised', borderwidth = 2)
padx = self['padx']
pady = self['pady']
orient = self['orient']
if orient == 'horizontal':
self._downArrowBtn.grid(column = 0, row = 0)
self._counterEntry.grid(column = 1, row = 0,
sticky = self['sticky'])
self._upArrowBtn.grid(column = 2, row = 0)
frame.grid_columnconfigure(1, weight = 1)
frame.grid_rowconfigure(0, weight = 1)
if Tkinter.TkVersion >= 4.2:
frame.grid_columnconfigure(0, pad = padx)
frame.grid_columnconfigure(2, pad = padx)
frame.grid_rowconfigure(0, pad = pady)
elif orient == 'vertical':
self._upArrowBtn.grid(column = 0, row = 0, sticky = 's')
self._counterEntry.grid(column = 0, row = 1,
sticky = self['sticky'])
self._downArrowBtn.grid(column = 0, row = 2, sticky = 'n')
frame.grid_columnconfigure(0, weight = 1)
frame.grid_rowconfigure(0, weight = 1)
frame.grid_rowconfigure(2, weight = 1)
if Tkinter.TkVersion >= 4.2:
frame.grid_rowconfigure(0, pad = pady)
frame.grid_rowconfigure(2, pad = pady)
frame.grid_columnconfigure(0, pad = padx)
else:
raise ValueError, 'bad orient option ' + repr(orient) + \
': must be either \'horizontal\' or \'vertical\''
self.createlabel(interior)
self._upArrowBtn.bind('<Configure>', self._drawUpArrow)
self._upArrowBtn.bind('<1>', self._countUp)
self._upArrowBtn.bind('<Any-ButtonRelease-1>', self._stopCounting)
self._downArrowBtn.bind('<Configure>', self._drawDownArrow)
self._downArrowBtn.bind('<1>', self._countDown)
self._downArrowBtn.bind('<Any-ButtonRelease-1>', self._stopCounting)
self._counterEntry.bind('<Configure>', self._resizeArrow)
entry = self._counterEntry.component('entry')
entry.bind('<Down>', lambda event, s = self: s._key_decrement(event))
entry.bind('<Up>', lambda event, s = self: s._key_increment(event))
# Need to cancel the timer if an arrow button is unmapped (eg:
# its toplevel window is withdrawn) while the mouse button is
# held down. The canvas will not get the ButtonRelease event
# if it is not mapped, since the implicit grab is cancelled.
self._upArrowBtn.bind('<Unmap>', self._stopCounting)
self._downArrowBtn.bind('<Unmap>', self._stopCounting)
# Check keywords and initialise options.
self.initialiseoptions()
def _resizeArrow(self, event):
for btn in (self._upArrowBtn, self._downArrowBtn):
bw = (string.atoi(btn['borderwidth']) +
string.atoi(btn['highlightthickness']))
newHeight = self._counterEntry.winfo_reqheight() - 2 * bw
newWidth = int(newHeight * self['buttonaspect'])
btn.configure(width=newWidth, height=newHeight)
self._drawArrow(btn)
def _drawUpArrow(self, event):
self._drawArrow(self._upArrowBtn)
def _drawDownArrow(self, event):
self._drawArrow(self._downArrowBtn)
def _drawArrow(self, arrow):
if self['orient'] == 'vertical':
if arrow == self._upArrowBtn:
direction = 'up'
else:
direction = 'down'
else:
if arrow == self._upArrowBtn:
direction = 'right'
else:
direction = 'left'
drawarrow(arrow, self['entry_foreground'], direction, 'arrow')
def _stopCounting(self, event = None):
if self._timerId is not None:
self.after_cancel(self._timerId)
self._timerId = None
if self._normalRelief is not None:
button, relief = self._normalRelief
button.configure(relief=relief)
self._normalRelief = None
def _countUp(self, event):
self._normalRelief = (self._upArrowBtn, self._upArrowBtn.cget('relief'))
self._upArrowBtn.configure(relief='sunken')
# Force arrow down (it may come up immediately, if increment fails).
self._upArrowBtn.update_idletasks()
self._count(1, 1)
def _countDown(self, event):
self._normalRelief = (self._downArrowBtn, self._downArrowBtn.cget('relief'))
self._downArrowBtn.configure(relief='sunken')
# Force arrow down (it may come up immediately, if increment fails).
self._downArrowBtn.update_idletasks()
self._count(-1, 1)
def increment(self):
self._forceCount(1)
def decrement(self):
self._forceCount(-1)
def _key_increment(self, event):
self._forceCount(1)
self.update_idletasks()
def _key_decrement(self, event):
self._forceCount(-1)
self.update_idletasks()
def _datatype(self):
datatype = self['datatype']
if type(datatype) is types.DictionaryType:
self._counterArgs = datatype.copy()
if self._counterArgs.has_key('counter'):
datatype = self._counterArgs['counter']
del self._counterArgs['counter']
else:
datatype = 'numeric'
else:
self._counterArgs = {}
if _counterCommands.has_key(datatype):
self._counterCommand = _counterCommands[datatype]
elif callable(datatype):
self._counterCommand = datatype
else:
validValues = _counterCommands.keys()
validValues.sort()
raise ValueError, ('bad datatype value "%s": must be a' +
' function or one of %s') % (datatype, validValues)
def _forceCount(self, factor):
if not self.valid():
self.bell()
return
text = self._counterEntry.get()
try:
value = apply(self._counterCommand,
(text, factor, self['increment']), self._counterArgs)
except ValueError:
self.bell()
return
previousICursor = self._counterEntry.index('insert')
if self._counterEntry.setentry(value) == OK:
self._counterEntry.xview('end')
self._counterEntry.icursor(previousICursor)
def _count(self, factor, first):
if not self.valid():
self.bell()
return
self._timerId = None
origtext = self._counterEntry.get()
try:
value = apply(self._counterCommand,
(origtext, factor, self['increment']), self._counterArgs)
except ValueError:
# If text is invalid, stop counting.
self._stopCounting()
self.bell()
return
# If incrementing produces an invalid value, restore previous
# text and stop counting.
previousICursor = self._counterEntry.index('insert')
valid = self._counterEntry.setentry(value)
if valid != OK:
self._stopCounting()
self._counterEntry.setentry(origtext)
if valid == PARTIAL:
self.bell()
return
self._counterEntry.xview('end')
self._counterEntry.icursor(previousICursor)
if self['autorepeat']:
if first:
delay = self['initwait']
else:
delay = self['repeatrate']
self._timerId = self.after(delay,
lambda self=self, factor=factor: self._count(factor, 0))
def destroy(self):
self._stopCounting()
MegaWidget.destroy(self)
forwardmethods(Counter, EntryField, '_counterEntry')
def _changeNumber(text, factor, increment):
value = string.atol(text)
if factor > 0:
value = (value / increment) * increment + increment
else:
value = ((value - 1) / increment) * increment
# Get rid of the 'L' at the end of longs (in python up to 1.5.2).
rtn = str(value)
if rtn[-1] == 'L':
return rtn[:-1]
else:
return rtn
def _changeReal(text, factor, increment, separator = '.'):
value = stringtoreal(text, separator)
div = value / increment
# Compare reals using str() to avoid problems caused by binary
# numbers being only approximations to decimal numbers.
# For example, if value is -0.3 and increment is 0.1, then
# int(value/increment) = -2, not -3 as one would expect.
if str(div)[-2:] == '.0':
# value is an even multiple of increment.
div = round(div) + factor
else:
# value is not an even multiple of increment.
div = int(div) * 1.0
if value < 0:
div = div - 1
if factor > 0:
div = (div + 1)
value = div * increment
text = str(value)
if separator != '.':
index = string.find(text, '.')
if index >= 0:
text = text[:index] + separator + text[index + 1:]
return text
def _changeDate(value, factor, increment, format = 'ymd',
separator = '/', yyyy = 0):
jdn = datestringtojdn(value, format, separator) + factor * increment
y, m, d = jdntoymd(jdn)
result = ''
for index in range(3):
if index > 0:
result = result + separator
f = format[index]
if f == 'y':
if yyyy:
result = result + '%02d' % y
else:
result = result + '%02d' % (y % 100)
elif f == 'm':
result = result + '%02d' % m
elif f == 'd':
result = result + '%02d' % d
return result
_SECSPERDAY = 24 * 60 * 60
def _changeTime(value, factor, increment, separator = ':', time24 = 0):
unixTime = timestringtoseconds(value, separator)
if factor > 0:
chunks = unixTime / increment + 1
else:
chunks = (unixTime - 1) / increment
unixTime = chunks * increment
if time24:
while unixTime < 0:
unixTime = unixTime + _SECSPERDAY
while unixTime >= _SECSPERDAY:
unixTime = unixTime - _SECSPERDAY
if unixTime < 0:
unixTime = -unixTime
sign = '-'
else:
sign = ''
secs = unixTime % 60
unixTime = unixTime / 60
mins = unixTime % 60
hours = unixTime / 60
return '%s%02d%s%02d%s%02d' % (sign, hours, separator, mins, separator, secs)
# hexadecimal, alphabetic, alphanumeric not implemented
_counterCommands = {
'numeric' : _changeNumber, # } integer
'integer' : _changeNumber, # } these two use the same function
'real' : _changeReal, # real number
'time' : _changeTime,
'date' : _changeDate,
}
######################################################################
### File: PmwCounterDialog.py
# A Dialog with a counter
class CounterDialog(Dialog):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('borderx', 20, INITOPT),
('bordery', 20, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
# Create the counter.
aliases = (
('entryfield', 'counter_entryfield'),
('entry', 'counter_entryfield_entry'),
('label', 'counter_label')
)
self._cdCounter = self.createcomponent('counter',
aliases, None,
Counter, (interior,))
self._cdCounter.pack(fill='x', expand=1,
padx = self['borderx'], pady = self['bordery'])
if not kw.has_key('activatecommand'):
# Whenever this dialog is activated, set the focus to the
# Counter's entry widget.
tkentry = self.component('entry')
self.configure(activatecommand = tkentry.focus_set)
# Check keywords and initialise options.
self.initialiseoptions()
# Supply aliases to some of the entry component methods.
def insertentry(self, index, text):
self._cdCounter.insert(index, text)
def deleteentry(self, first, last=None):
self._cdCounter.delete(first, last)
def indexentry(self, index):
return self._cdCounter.index(index)
forwardmethods(CounterDialog, Counter, '_cdCounter')
######################################################################
### File: PmwLogicalFont.py
import os
import string
def _font_initialise(root, size=None, fontScheme = None):
global _fontSize
if size is not None:
_fontSize = size
if fontScheme in ('pmw1', 'pmw2'):
if os.name == 'posix':
defaultFont = logicalfont('Helvetica')
menuFont = logicalfont('Helvetica', weight='bold', slant='italic')
scaleFont = logicalfont('Helvetica', slant='italic')
root.option_add('*Font', defaultFont, 'userDefault')
root.option_add('*Menu*Font', menuFont, 'userDefault')
root.option_add('*Menubutton*Font', menuFont, 'userDefault')
root.option_add('*Scale.*Font', scaleFont, 'userDefault')
if fontScheme == 'pmw1':
balloonFont = logicalfont('Helvetica', -6, pixel = '12')
else: # fontScheme == 'pmw2'
balloonFont = logicalfont('Helvetica', -2)
root.option_add('*Balloon.*Font', balloonFont, 'userDefault')
else:
defaultFont = logicalfont('Helvetica')
root.option_add('*Font', defaultFont, 'userDefault')
elif fontScheme == 'default':
defaultFont = ('Helvetica', '-%d' % (_fontSize,), 'bold')
entryFont = ('Helvetica', '-%d' % (_fontSize,))
textFont = ('Courier', '-%d' % (_fontSize,))
root.option_add('*Font', defaultFont, 'userDefault')
root.option_add('*Entry*Font', entryFont, 'userDefault')
root.option_add('*Text*Font', textFont, 'userDefault')
def logicalfont(name='Helvetica', sizeIncr = 0, **kw):
if not _fontInfo.has_key(name):
raise ValueError, 'font %s does not exist' % name
rtn = []
for field in _fontFields:
if kw.has_key(field):
logicalValue = kw[field]
elif _fontInfo[name].has_key(field):
logicalValue = _fontInfo[name][field]
else:
logicalValue = '*'
if _propertyAliases[name].has_key((field, logicalValue)):
realValue = _propertyAliases[name][(field, logicalValue)]
elif _propertyAliases[name].has_key((field, None)):
realValue = _propertyAliases[name][(field, None)]
elif _propertyAliases[None].has_key((field, logicalValue)):
realValue = _propertyAliases[None][(field, logicalValue)]
elif _propertyAliases[None].has_key((field, None)):
realValue = _propertyAliases[None][(field, None)]
else:
realValue = logicalValue
if field == 'size':
if realValue == '*':
realValue = _fontSize
realValue = str((realValue + sizeIncr) * 10)
rtn.append(realValue)
return string.join(rtn, '-')
def logicalfontnames():
return _fontInfo.keys()
if os.name == 'nt':
_fontSize = 16
else:
_fontSize = 14
_fontFields = (
'registry', 'foundry', 'family', 'weight', 'slant', 'width', 'style',
'pixel', 'size', 'xres', 'yres', 'spacing', 'avgwidth', 'charset', 'encoding')
# <_propertyAliases> defines other names for which property values may
# be known by. This is required because italics in adobe-helvetica
# are specified by 'o', while other fonts use 'i'.
_propertyAliases = {}
_propertyAliases[None] = {
('slant', 'italic') : 'i',
('slant', 'normal') : 'r',
('weight', 'light') : 'normal',
('width', 'wide') : 'normal',
('width', 'condensed') : 'normal',
}
# <_fontInfo> describes a 'logical' font, giving the default values of
# some of its properties.
_fontInfo = {}
_fontInfo['Helvetica'] = {
'foundry' : 'adobe',
'family' : 'helvetica',
'registry' : '',
'charset' : 'iso8859',
'encoding' : '1',
'spacing' : 'p',
'slant' : 'normal',
'width' : 'normal',
'weight' : 'normal',
}
_propertyAliases['Helvetica'] = {
('slant', 'italic') : 'o',
('weight', 'normal') : 'medium',
('weight', 'light') : 'medium',
}
_fontInfo['Times'] = {
'foundry' : 'adobe',
'family' : 'times',
'registry' : '',
'charset' : 'iso8859',
'encoding' : '1',
'spacing' : 'p',
'slant' : 'normal',
'width' : 'normal',
'weight' : 'normal',
}
_propertyAliases['Times'] = {
('weight', 'normal') : 'medium',
('weight', 'light') : 'medium',
}
_fontInfo['Fixed'] = {
'foundry' : 'misc',
'family' : 'fixed',
'registry' : '',
'charset' : 'iso8859',
'encoding' : '1',
'spacing' : 'c',
'slant' : 'normal',
'width' : 'normal',
'weight' : 'normal',
}
_propertyAliases['Fixed'] = {
('weight', 'normal') : 'medium',
('weight', 'light') : 'medium',
('style', None) : '',
('width', 'condensed') : 'semicondensed',
}
_fontInfo['Courier'] = {
'foundry' : 'adobe',
'family' : 'courier',
'registry' : '',
'charset' : 'iso8859',
'encoding' : '1',
'spacing' : 'm',
'slant' : 'normal',
'width' : 'normal',
'weight' : 'normal',
}
_propertyAliases['Courier'] = {
('weight', 'normal') : 'medium',
('weight', 'light') : 'medium',
('style', None) : '',
}
_fontInfo['Typewriter'] = {
'foundry' : 'b&h',
'family' : 'lucidatypewriter',
'registry' : '',
'charset' : 'iso8859',
'encoding' : '1',
'spacing' : 'm',
'slant' : 'normal',
'width' : 'normal',
'weight' : 'normal',
}
_propertyAliases['Typewriter'] = {
('weight', 'normal') : 'medium',
('weight', 'light') : 'medium',
}
if os.name == 'nt':
# For some reason 'fixed' fonts on NT aren't.
_fontInfo['Fixed'] = _fontInfo['Courier']
_propertyAliases['Fixed'] = _propertyAliases['Courier']
| apache-2.0 |
nomaro/SickBeard_Backup | lib/requests/packages/chardet2/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| gpl-3.0 |
lakshayg/tensorflow | tensorflow/contrib/keras/api/keras/activations/__init__.py | 74 | 1856 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Activation functions.
from tensorflow.python.keras._impl.keras.activations import elu
from tensorflow.python.keras._impl.keras.activations import hard_sigmoid
from tensorflow.python.keras._impl.keras.activations import linear
from tensorflow.python.keras._impl.keras.activations import relu
from tensorflow.python.keras._impl.keras.activations import selu
from tensorflow.python.keras._impl.keras.activations import sigmoid
from tensorflow.python.keras._impl.keras.activations import softmax
from tensorflow.python.keras._impl.keras.activations import softplus
from tensorflow.python.keras._impl.keras.activations import softsign
from tensorflow.python.keras._impl.keras.activations import tanh
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.activations import deserialize
from tensorflow.python.keras._impl.keras.activations import serialize
from tensorflow.python.keras._impl.keras.activations import get
del absolute_import
del division
del print_function
| apache-2.0 |
xiangpeng/shadowsocks | shadowsocks/crypto/ctypes_openssl.py | 26 | 6805 | #!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
__all__ = ['ciphers']
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p, buf
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from ctypes.util import find_library
for p in ('crypto', 'eay32', 'libeay32'):
libcrypto_path = find_library(p)
if libcrypto_path:
break
else:
raise Exception('libcrypto(OpenSSL) not found')
logging.info('loading libcrypto from %s', libcrypto_path)
libcrypto = CDLL(libcrypto_path)
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p)
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class CtypesCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_openssl()
self._ctx = None
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new(cipher, None,
key_ptr, iv_ptr)
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, CtypesCrypto),
'aes-192-cfb': (24, 16, CtypesCrypto),
'aes-256-cfb': (32, 16, CtypesCrypto),
'aes-128-ofb': (16, 16, CtypesCrypto),
'aes-192-ofb': (24, 16, CtypesCrypto),
'aes-256-ofb': (32, 16, CtypesCrypto),
'aes-128-ctr': (16, 16, CtypesCrypto),
'aes-192-ctr': (24, 16, CtypesCrypto),
'aes-256-ctr': (32, 16, CtypesCrypto),
'aes-128-cfb8': (16, 16, CtypesCrypto),
'aes-192-cfb8': (24, 16, CtypesCrypto),
'aes-256-cfb8': (32, 16, CtypesCrypto),
'aes-128-cfb1': (16, 16, CtypesCrypto),
'aes-192-cfb1': (24, 16, CtypesCrypto),
'aes-256-cfb1': (32, 16, CtypesCrypto),
'bf-cfb': (16, 8, CtypesCrypto),
'camellia-128-cfb': (16, 16, CtypesCrypto),
'camellia-192-cfb': (24, 16, CtypesCrypto),
'camellia-256-cfb': (32, 16, CtypesCrypto),
'cast5-cfb': (16, 8, CtypesCrypto),
'des-cfb': (8, 8, CtypesCrypto),
'idea-cfb': (16, 8, CtypesCrypto),
'rc2-cfb': (16, 8, CtypesCrypto),
'rc4': (16, 0, CtypesCrypto),
'seed-cfb': (16, 16, CtypesCrypto),
}
def test():
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
import M2Crypto.EVP
# cipher = M2Crypto.EVP.Cipher('aes_128_cfb', 'k' * 32, 'i' * 16, 1,
# key_as_bytes=0, d='md5', salt=None, i=1,
# padding=1)
# decipher = M2Crypto.EVP.Cipher('aes_128_cfb', 'k' * 32, 'i' * 16, 0,
# key_as_bytes=0, d='md5', salt=None, i=1,
# padding=1)
cipher = CtypesCrypto('aes-128-cfb', 'k' * 32, 'i' * 16, 1)
decipher = CtypesCrypto('aes-128-cfb', 'k' * 32, 'i' * 16, 0)
# cipher = Salsa20Cipher('salsa20-ctr', 'k' * 32, 'i' * 8, 1)
# decipher = Salsa20Cipher('salsa20-ctr', 'k' * 32, 'i' * 8, 1)
results = []
pos = 0
print 'salsa20 test start'
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = ''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print 'speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start))
assert ''.join(results) == plain
if __name__ == '__main__':
test()
| mit |
codelucas/flask_reddit | flask_reddit/__init__.py | 1 | 1536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by:
Lucas Ou -- http://lucasou.com
"""
from flask import Flask, render_template, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.routing import BaseConverter
app = Flask(__name__, static_url_path='/static')
app.config.from_object('config')
db = SQLAlchemy(app)
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def not_found(error):
return render_template('500.html'), 500
from flask_reddit.users.views import mod as users_module
app.register_blueprint(users_module)
from flask_reddit.threads.views import mod as threads_module
app.register_blueprint(threads_module)
from flask_reddit.frontends.views import mod as frontends_module
app.register_blueprint(frontends_module)
from flask_reddit.apis.views import mod as apis_module
app.register_blueprint(apis_module)
from flask_reddit.subreddits.views import mod as subreddits_module
app.register_blueprint(subreddits_module)
def custom_render(template, *args, **kwargs):
"""
custom template rendering including some flask_reddit vars
"""
return render_template(template, *args, **kwargs)
app.debug = app.config['DEBUG']
if __name__ == '__main__':
print 'We are running flask via main()'
app.run()
| mit |
glomium/elmnt.de | gallery/migrations/0001_initial.py | 1 | 1037 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import filer.fields.image
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(verbose_name='date', blank=True)),
('slug', models.SlugField(unique=True, verbose_name='slug', blank=True)),
('image', filer.fields.image.FilerImageField(on_delete=django.db.models.deletion.SET_NULL, default=None, verbose_name='Image', to='filer.Image', null=True)),
],
options={
'ordering': ['-date'],
'verbose_name': 'photo',
'verbose_name_plural': 'photos',
},
),
]
| mit |
sadleader/odoo | addons/procurement_jit/__openerp__.py | 312 | 2085 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Just In Time Scheduling',
'version': '1.0',
'category': 'Base',
'description': """
This module allows Just In Time computation of procurement orders.
==================================================================
If you install this module, you will not have to run the regular procurement
scheduler anymore (but you still need to run the minimum order point rule
scheduler, or for example let it run daily).
All procurement orders will be processed immediately, which could in some
cases entail a small performance impact.
It may also increase your stock size because products are reserved as soon
as possible and the scheduler time range is not taken into account anymore.
In that case, you can not use priorities any more on the different picking.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['procurement'],
'data': [],
'demo': [],
'test': ['test/procurement_jit.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sheeshmohsin/venturesity | flexy/flexy/wsgi.py | 1 | 1416 | """
WSGI config for flexy project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "flexy.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flexy.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
jaja14/lab4 | lib/werkzeug/contrib/lint.py | 318 | 12282 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| apache-2.0 |
rue89-tech/edx-platform | lms/djangoapps/ccx/views.py | 10 | 18760 | """
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from copy import deepcopy
from cStringIO import StringIO
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django_future.csrf import ensure_csrf_cookie # pylint: disable=import-error
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id # pylint: disable=import-error
from courseware.field_overrides import disable_overrides # pylint: disable=import-error
from courseware.grades import iterate_grades_for # pylint: disable=import-error
from courseware.model_data import FieldDataCache # pylint: disable=import-error
from courseware.module_render import get_module_for_descriptor # pylint: disable=import-error
from edxmako.shortcuts import render_to_response # pylint: disable=import-error
from opaque_keys.edx.keys import CourseKey
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from instructor.offline_gradecalc import student_grades # pylint: disable=import-error
from instructor.views.api import _split_input_list # pylint: disable=import-error
from instructor.views.tools import get_student_from_identifier # pylint: disable=import-error
from .models import CustomCourseForEdX, CcxMembership
from .overrides import (
clear_override_for_ccx,
get_override_for_ccx,
override_field_for_ccx,
ccx_context,
)
from .utils import (
enroll_email,
unenroll_email,
)
from ccx import ACTIVE_CCX_KEY # pylint: disable=import-error
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
return view(request, course)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course):
"""
Display the CCX Coach Dashboard.
"""
ccx = get_ccx_for_coach(course, request.user)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': course.id})
context['ccx_members'] = CcxMembership.objects.filter(ccx=ccx)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': course.id})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': course.id})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': course.id})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course):
"""
Create a new CCX
"""
name = request.POST.get('name')
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course):
"""
Save changes to CCX.
"""
ccx = get_ccx_for_coach(course, request.user)
def override_fields(parent, data, graded, earliest=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
clear_override_for_ccx(ccx, block, 'start')
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
clear_override_for_ccx(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
if children:
override_fields(block, children, graded, earliest)
return earliest
graded = {}
earliest = override_fields(course, json.loads(request.body), graded)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section['min_count']:
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course):
"""
Set grading policy for the CCX.
"""
ccx = get_ccx_for_coach(course, request.user)
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
try:
return CustomCourseForEdX.objects.get(
course_id=course.id,
coach=coach)
except CustomCourseForEdX.DoesNotExist:
return None
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
"""
for child in node.get_children():
start = get_override_for_ccx(ccx, child, 'start', None)
if start:
start = str(start)[:-9]
due = get_override_for_ccx(ccx, child, 'due', None)
if due:
due = str(due)[:-9]
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course):
"""
get json representation of ccx schedule
"""
ccx = get_ccx_for_coach(course, request.user)
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, mimetype='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course):
"""
Invite users to new ccx
"""
ccx = get_ccx_for_coach(course, request.user)
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = True if 'auto-enroll' in request.POST else False
email_students = True if 'email-students' in request.POST else False
for identifier in identifiers:
user = None
email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
if action == 'Enroll':
enroll_email(
ccx,
email,
auto_enroll=auto_enroll,
email_students=email_students
)
if action == "Unenroll":
unenroll_email(ccx, email, email_students=email_students)
except ValidationError:
log.info('Invalid user name or email when trying to invite students: %s', email)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course):
"""Manage the enrollment of individual students in a CCX
"""
ccx = get_ccx_for_coach(course, request.user)
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
user = email = None
try:
user = get_student_from_identifier(student_id)
except User.DoesNotExist:
email = student_id
else:
email = user.email
try:
validate_email(email)
if action == 'add':
# by decree, no emails sent to students added this way
# by decree, any students added this way are auto_enrolled
enroll_email(ccx, email, auto_enroll=True, email_students=False)
elif action == 'revoke':
unenroll_email(ccx, email, email_students=False)
except ValidationError:
log.info('Invalid user name or email when trying to enroll student: %s', email)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course):
"""
Show the gradebook for this CCX.
"""
# Need course module for overrides to function properly
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id)
ccx = get_ccx_for_coach(course, request.user)
with ccx_context(ccx):
# The grading policy for the MOOC is probably already cached. We need
# to make sure we have the CCX grading policy loaded.
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
enrolled_students = User.objects.filter(
ccxmembership__ccx=ccx,
ccxmembership__active=1
).order_by('username').select_related("profile")
student_info = [
{
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': student_grades(student, request, course),
'realname': student.profile.name,
}
for student in enrolled_students
]
return render_to_response('courseware/gradebook.html', {
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course):
"""
Download grades as CSV.
"""
# Need course module for overrides to function properly
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id)
ccx = get_ccx_for_coach(course, request.user)
with ccx_context(ccx):
# The grading policy for the MOOC is probably already cached. We need
# to make sure we have the CCX grading policy loaded.
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
enrolled_students = User.objects.filter(
ccxmembership__ccx=ccx,
ccxmembership__active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
return HttpResponse(buf.getvalue(), content_type='text/plain')
@login_required
def switch_active_ccx(request, course_id, ccx_id=None):
"""set the active CCX for the logged-in user
"""
course_key = CourseKey.from_string(course_id)
# will raise Http404 if course_id is bad
course = get_course_by_id(course_key)
course_url = reverse(
'course_root', args=[course.id.to_deprecated_string()]
)
if ccx_id is not None:
try:
requested_ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
assert unicode(requested_ccx.course_id) == course_id
if not CcxMembership.objects.filter(
ccx=requested_ccx, student=request.user, active=True
).exists():
ccx_id = None
except CustomCourseForEdX.DoesNotExist:
# what to do here? Log the failure? Do we care?
ccx_id = None
except AssertionError:
# what to do here? Log the failure? Do we care?
ccx_id = None
request.session[ACTIVE_CCX_KEY] = ccx_id
return HttpResponseRedirect(course_url)
| agpl-3.0 |
nordri/check_domains | lib/python2.7/site-packages/django/contrib/messages/storage/fallback.py | 704 | 2172 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| gpl-3.0 |
ephes/scikit-learn | sklearn/neighbors/kde.py | 303 | 7983 | """
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Read more in the :ref:`User Guide <kernel_density>`.
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
N = self.tree_.data.shape[0]
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
i = rng.randint(data.shape[0], size=n_samples)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
| bsd-3-clause |
mavit/ansible | lib/ansible/plugins/callback/debug.py | 30 | 1772 | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: debug
type: stdout
short_description: formatted stdout/stderr display
description:
- Use this callback to sort though extensive debug output
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default): # pylint: disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'debug'
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg', 'module_stdout', 'module_stderr']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg', 'module_stdout', 'module_stderr']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
| gpl-3.0 |
klmitch/keystone | keystone/common/json_home.py | 4 | 3238 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone import exception
from keystone.i18n import _
def build_v3_resource_relation(resource_name):
return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
resource_name)
def build_v3_extension_resource_relation(extension_name, extension_version,
resource_name):
return (
'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' %
(extension_name, extension_version, resource_name))
def build_v3_parameter_relation(parameter_name):
return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' %
parameter_name)
def build_v3_extension_parameter_relation(extension_name, extension_version,
parameter_name):
return (
'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/'
'%s' % (extension_name, extension_version, parameter_name))
class Parameters(object):
"""Relationships for Common parameters."""
DOMAIN_ID = build_v3_parameter_relation('domain_id')
ENDPOINT_ID = build_v3_parameter_relation('endpoint_id')
GROUP_ID = build_v3_parameter_relation('group_id')
POLICY_ID = build_v3_parameter_relation('policy_id')
PROJECT_ID = build_v3_parameter_relation('project_id')
REGION_ID = build_v3_parameter_relation('region_id')
ROLE_ID = build_v3_parameter_relation('role_id')
SERVICE_ID = build_v3_parameter_relation('service_id')
USER_ID = build_v3_parameter_relation('user_id')
class Status(object):
"""Status values supported."""
DEPRECATED = 'deprecated'
EXPERIMENTAL = 'experimental'
STABLE = 'stable'
@classmethod
def update_resource_data(cls, resource_data, status):
if status is cls.STABLE:
# We currently do not add a status if the resource is stable, the
# absence of the status property can be taken as meaning that the
# resource is stable.
return
if status is cls.DEPRECATED or status is cls.EXPERIMENTAL:
resource_data['hints'] = {'status': status}
return
raise exception.Error(message=_(
'Unexpected status requested for JSON Home response, %s') % status)
def translate_urls(json_home, new_prefix):
"""Given a JSON Home document, sticks new_prefix on each of the urls."""
for dummy_rel, resource in json_home['resources'].items():
if 'href' in resource:
resource['href'] = new_prefix + resource['href']
elif 'href-template' in resource:
resource['href-template'] = new_prefix + resource['href-template']
| apache-2.0 |
vmindru/ansible | test/units/template/test_templar.py | 29 | 17373 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from units.compat import unittest
from units.compat.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment
from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
str_with_error="{{ 'str' | from_json }}",
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
def is_unsafe(self, obj):
if obj is None:
return False
if hasattr(obj, '__UNSAFE__'):
return True
if isinstance(obj, AnsibleUnsafe):
return True
if isinstance(obj, dict):
for key in obj.keys():
if self.is_unsafe(key) or self.is_unsafe(obj[key]):
return True
if isinstance(obj, list):
for item in obj:
if self.is_unsafe(item):
return True
if isinstance(obj, string_types) and hasattr(obj, '__UNSAFE__'):
return True
return False
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
# self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_templatable(self):
res = self.templar.templatable('foo')
self.assertTrue(res)
def test_templatable_none(self):
res = self.templar.templatable(None)
self.assertTrue(res)
@patch('ansible.template.Templar.template', side_effect=AnsibleError)
def test_templatable_exception(self, mock_template):
res = self.templar.templatable('foo')
self.assertFalse(res)
def test_template_convert_bare_string(self):
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True)
self.assertEqual(res, 'unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
self.assertEqual(res, 'Unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
def test_template_with_error(self):
"""Check that AnsibleError is raised, fail if an unhandled exception is raised"""
self.assertRaises(AnsibleError, self.templar.template, "{{ str_with_error }}")
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
r'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
# self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
# self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
| gpl-3.0 |
kinshuk4/MoocX | misc/deep_learning_notes/Proj_Centroid_Loss_LeNet/convnet_10_hidden/tests/softmax_loss_function_test.py | 6 | 1427 | import numpy as np, tensorflow as tf
from termcolor import colored as c, cprint
outputs = [
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # correct
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # wrong
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # null
[-268252.5625, 48779.19921875, 80110.6796875, 354422.34375,
158246.78125, 192678.75, 251321.09375, 353138.5, 362559.59375,
- 80943.828125] # from experiment
]
labels = [
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
]
"""
per the discussion here:
http://stackoverflow.com/questions/33712178/tensorflow-nan-bug
"""
with tf.Graph().as_default(), tf.device('/cpu:0'):
logits = tf.constant(outputs, dtype=tf.float64)
batch_labels = tf.constant(labels, dtype=tf.float64)
cross_entropy = - tf.div(
tf.reduce_mean(
tf.mul(batch_labels, tf.nn.log_softmax(logits)),
reduction_indices=[1]
),
tf.reduce_mean(
logits,
reduction_indices=[1]
)
)
with tf.Session() as sess:
print("here is the calculated loss before being summed up.")
results = sess.run([logits, cross_entropy])
print("======")
cprint(c('logits', 'green') + '\n' + str(results[0]))
print("------")
cprint(c('cross_entropy', 'green') + '\n' + str(results[1]))
print("======")
| mit |
Dekken/tick | tick/prox/tests/prox_zero_test.py | 2 | 1078 | # License: BSD 3 clause
import unittest
from numpy.testing import assert_almost_equal
from tick.prox import ProxZero
from tick.prox.tests.prox import TestProx
class ProxZeroTest(object):
def test_ProxZero(self):
"""...Test of ProxZero
"""
coeffs = self.coeffs.copy().astype(self.dtype)
out = coeffs.copy()
prox = ProxZero().astype(self.dtype)
self.assertAlmostEqual(prox.value(coeffs), 0., delta=1e-14)
assert_almost_equal(prox.call(coeffs), out, decimal=10)
prox = ProxZero((3, 8)).astype(self.dtype)
self.assertAlmostEqual(prox.value(coeffs), 0., delta=1e-14)
assert_almost_equal(prox.call(coeffs), out, decimal=10)
class ProxZeroTestFloat32(TestProx, ProxZeroTest):
def __init__(self, *args, **kwargs):
TestProx.__init__(self, *args, dtype="float32", **kwargs)
class ProxZeroTestFloat64(TestProx, ProxZeroTest):
def __init__(self, *args, **kwargs):
TestProx.__init__(self, *args, dtype="float64", **kwargs)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
CentOS-PaaS-SIG/aos-ci | utils/package_checker.py | 4 | 1923 | #!/bin/env python
import json
import sys
import httplib
# Exit 2 if upstream package list doesn't exist
# Exit 1 if file exists, but package isn't in the list
# Exit 0 if file exists, and package is in the list
jsonpath = 'fedora-atomic/fedora-atomic-host-base.json'
try:
with open(jsonpath, 'r') as f:
atomicjson = json.load(f)
mypackage = sys.argv[1]
# Check if package exists in the json file
# Check both all packages and x86_64 specific packages
if mypackage in atomicjson["packages"] or mypackage in atomicjson["packages-x86_64"]:
print ("Package of interest!")
sys.exit(0)
# Check if a package that comes from this distgit repo is in atomic
else:
# Open up a connection to mdapi
mdapi_server = httplib.HTTPSConnection('apps.fedoraproject.org', timeout=10)
# Get package name and branch from args
mypackage = sys.argv[1]
mybranch = sys.argv[2]
resultPage = "/mdapi/" + mybranch + "/srcpkg/" + mypackage
mdapi_server.request("GET",resultPage)
res = mdapi_server.getresponse()
if res.status != 200:
print("mdapi lookup failed for %s" % resultPage)
sys.exit(2)
mdapi_message = res.read()
# Convert to json
mdapi_parsed = json.loads(mdapi_message)
if "co-packages" in mdapi_parsed:
# Perform the check
if set(atomicjson["packages"]).isdisjoint(mdapi_parsed["co-packages"]) and set(atomicjson["packages-x86_64"]).isdisjoint(mdapi_parsed["co-packages"]):
# Sets are disjoint so package is not in atomic host
sys.exit(1)
else:
# Sets are not disjoint so package is in atomic host
print ("Package of interest!")
sys.exit(0)
except IOError as e:
print("Could not find upstream package json file")
sys.exit(2)
| gpl-3.0 |
lukaslueg/wirepy | wirepy/tests/test_column.py | 1 | 2137 | import unittest
from wirepy.lib import column, epan
epan.epan_init()
class TestType(unittest.TestCase):
def test_init(self):
col = column.Type(column.Type.ABS_TIME)
self.assertTrue(isinstance(col, column.Type))
self.assertTrue(isinstance(col.format_desc, str))
self.assertTrue(isinstance(col.format_string, str))
def test_from_string(self):
col = column.Type.from_string('%At')
self.assertTrue(isinstance(col, column.Type))
def test_iter(self):
for col in column.Type.iter_column_formats():
self.assertTrue(isinstance(col, column.Type))
repr(col)
def test_invalid_col(self):
self.assertRaises(column.InvalidColumnType, column.Type, -1)
self.assertRaises(column.InvalidColumnType, column.Type.from_string,
'_B0RK_')
class TestFormat(unittest.TestCase):
def test_init(self):
f = column.Format(title='The time', type_=column.Type.ABS_TIME,
custom_field='eth.src', custom_occurrence=1,
visible=True, resolved=True)
self.assertEqual(f.title, 'The time')
self.assertEqual(f.type_, column.Type.ABS_TIME)
self.assertEqual(f.custom_field, 'eth.src')
self.assertEqual(f.custom_occurrence, 1)
self.assertEqual(f.visible, True)
self.assertEqual(f.resolved, True)
class TestColumn(unittest.TestCase):
def test_init(self):
fmts = [column.Format(column.Type.ABS_TIME, title='The time'),
column.Format(column.Type.UNRES_DST, title='Destination'),
column.Format(column.Type.CUSTOM, title='Foobar',
custom_field='eth.src')]
info = column.ColumnInfo(fmts)
del fmts
self.assertEqual(info.fmts[0], column.Type.ABS_TIME)
self.assertEqual(info.fmts[1], column.Type.UNRES_DST)
self.assertEqual(info.titles[0], 'The time')
self.assertEqual(info.titles[1], 'Destination')
self.assertEqual(info.custom_fields[2], 'eth.src')
self.assertTrue(info.have_custom_cols)
| gpl-3.0 |
dntt1/youtube-dl | youtube_dl/extractor/ustream.py | 14 | 5475 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
)
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
IE_NAME = 'ustream'
_TESTS = [{
'url': 'http://www.ustream.tv/recorded/20274954',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
'id': '20274954',
'ext': 'flv',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
'description': 'Young Americans for Liberty February 7, 2012 2:28 AM',
'timestamp': 1328577035,
'upload_date': '20120207',
'uploader': 'yaliberty',
'uploader_id': '6780869',
},
}, {
# From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444
# Title and uploader available only from params JSON
'url': 'http://www.ustream.tv/embed/recorded/59307601?ub=ff0000&lc=ff0000&oc=ffffff&uc=ffffff&v=3&wmode=direct',
'md5': '5a2abf40babeac9812ed20ae12d34e10',
'info_dict': {
'id': '59307601',
'ext': 'flv',
'title': '-CG11- Canada Games Figure Skating',
'uploader': 'sportscanadatv',
},
'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.',
}, {
'url': 'http://www.ustream.tv/embed/10299409',
'info_dict': {
'id': '10299409',
},
'playlist_count': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
# some sites use this embed format (see: https://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('id')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
content_video_ids = self._parse_json(self._search_regex(
r'ustream\.vars\.offAirContentVideoIds=([^;]+);', webpage,
'content video IDs'), video_id)
return self.playlist_result(
map(lambda u: self.url_result('http://www.ustream.tv/recorded/' + u, 'Ustream'), content_video_ids),
video_id)
params = self._download_json(
'https://api.ustream.tv/videos/%s.json' % video_id, video_id)
error = params.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
video = params['video']
title = video['title']
filesize = float_or_none(video.get('file_size'))
formats = [{
'id': video_id,
'url': video_url,
'ext': format_id,
'filesize': filesize,
} for format_id, video_url in video['media_urls'].items()]
self._sort_formats(formats)
description = video.get('description')
timestamp = int_or_none(video.get('created_at'))
duration = float_or_none(video.get('length'))
view_count = int_or_none(video.get('views'))
uploader = video.get('owner', {}).get('username')
uploader_id = video.get('owner', {}).get('id')
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
} for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
}
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
_TEST = {
'url': 'http://www.ustream.tv/channel/channeljapan',
'info_dict': {
'id': '10874166',
},
'playlist_mincount': 17,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
reply = self._download_json(
compat_urlparse.urljoin(BASE, next_url), display_id,
note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
entries = [
self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
for vid in video_ids]
return {
'_type': 'playlist',
'id': channel_id,
'display_id': display_id,
'entries': entries,
}
| unlicense |
istresearch/readthedocs.org | readthedocs/builds/models.py | 2 | 14932 | import logging
import re
import os.path
from shutil import rmtree
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from guardian.shortcuts import assign
from taggit.managers import TaggableManager
from readthedocs.privacy.loader import (VersionManager, RelatedBuildManager,
BuildManager)
from readthedocs.projects.models import Project
from readthedocs.projects.constants import (PRIVACY_CHOICES, GITHUB_URL,
GITHUB_REGEXS, BITBUCKET_URL,
BITBUCKET_REGEXS, PRIVATE)
from readthedocs.core.resolver import resolve
from .constants import (BUILD_STATE, BUILD_TYPES, VERSION_TYPES,
LATEST, NON_REPOSITORY_VERSIONS, STABLE,
BUILD_STATE_FINISHED, BRANCH, TAG)
from .version_slug import VersionSlugField
DEFAULT_VERSION_PRIVACY_LEVEL = getattr(settings, 'DEFAULT_VERSION_PRIVACY_LEVEL', 'public')
log = logging.getLogger(__name__)
class Version(models.Model):
"""
Attributes
----------
``identifier``
The identifier is the ID for the revision this is version is for. This
might be the revision number (e.g. in SVN), or the commit hash (e.g. in
Git). If the this version is pointing to a branch, then ``identifier``
will contain the branch name.
``verbose_name``
This is the actual name that we got for the commit stored in
``identifier``. This might be the tag or branch name like ``"v1.0.4"``.
However this might also hold special version names like ``"latest"``
and ``"stable"``.
``slug``
The slug is the slugified version of ``verbose_name`` that can be used
in the URL to identify this version in a project. It's also used in the
filesystem to determine how the paths for this version are called. It
must not be used for any other identifying purposes.
"""
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='versions')
type = models.CharField(
_('Type'), max_length=20,
choices=VERSION_TYPES, default='unknown',
)
# used by the vcs backend
identifier = models.CharField(_('Identifier'), max_length=255)
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
slug = VersionSlugField(_('Slug'), max_length=255,
populate_from='verbose_name')
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=PRIVACY_CHOICES,
default=DEFAULT_VERSION_PRIVACY_LEVEL, help_text=_("Level of privacy for this Version.")
)
tags = TaggableManager(blank=True)
machine = models.BooleanField(_('Machine Created'), default=False)
objects = VersionManager()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),
)
def __unicode__(self):
return ugettext(u"Version %(version)s of %(project)s (%(pk)s)" % {
'version': self.verbose_name,
'project': self.project,
'pk': self.pk
})
@property
def commit_name(self):
"""
Return the branch name, the tag name or the revision identifier.
The result could be used as ref in a git repo, e.g. for linking to
GitHub or Bitbucket.
"""
# LATEST is special as it is usually a branch but does not contain the
# name in verbose_name.
if self.slug == LATEST:
if self.project.default_branch:
return self.project.default_branch
else:
return self.project.vcs_repo().fallback_branch
if self.slug == STABLE:
if self.type == BRANCH:
# Special case, as we do not store the original branch name
# that the stable version works on. We can only interpolate the
# name from the commit identifier, but it's hacky.
# TODO: Refactor ``Version`` to store more actual info about
# the underlying commits.
if self.identifier.startswith('origin/'):
return self.identifier[len('origin/'):]
return self.identifier
# By now we must have handled all special versions.
assert self.slug not in NON_REPOSITORY_VERSIONS
if self.type in (BRANCH, TAG):
# If this version is a branch or a tag, the verbose_name will
# contain the actual name. We cannot use identifier as this might
# include the "origin/..." part in the case of a branch. A tag
# would contain the hash in identifier, which is not as pretty as
# the actual tag name.
return self.verbose_name
# If we came that far it's not a special version nor a branch or tag.
# Therefore just return the identifier to make a safe guess.
return self.identifier
def get_absolute_url(self):
if not self.built and not self.uploaded:
return reverse('project_version_detail', kwargs={
'project_slug': self.project.slug,
'version_slug': self.slug,
})
private = self.privacy_level == PRIVATE
return self.project.get_docs_url(version_slug=self.slug, private=private)
def save(self, *args, **kwargs):
"""
Add permissions to the Version for all owners on save.
"""
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
self.project.sync_supported_versions()
return obj
def delete(self, *args, **kwargs):
from readthedocs.projects.tasks import clear_artifacts
log.info('Removing files for version %s' % self.slug)
clear_artifacts.delay(version_pk=self.pk)
super(Version, self).delete(*args, **kwargs)
@property
def identifier_friendly(self):
'''Return display friendly identifier'''
re_sha = re.compile(r'^[0-9a-f]{40}$', re.I)
if re_sha.match(str(self.identifier)):
return self.identifier[:8]
return self.identifier
def get_subdomain_url(self):
private = self.privacy_level == PRIVATE
return resolve(project=self.project, version_slug=self.slug, private=private)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_production_media_url('htmlzip', self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_production_media_url('epub', self.slug)
else:
if project.has_pdf(self.slug):
data['pdf'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip'] = project.get_production_media_url('htmlzip', self.slug)
if project.has_epub(self.slug):
data['epub'] = project.get_production_media_url('epub', self.slug)
return data
def get_conf_py_path(self):
conf_py_path = self.project.conf_dir(self.slug)
checkout_prefix = self.project.checkout_path(self.slug)
conf_py_path = os.path.relpath(conf_py_path, checkout_prefix)
return conf_py_path
def get_build_path(self):
'''Return version build path if path exists, otherwise `None`'''
path = self.project.checkout_path(version=self.slug)
if os.path.exists(path):
return path
return None
def clean_build_path(self):
'''Clean build path for project version
Ensure build path is clean for project version. Used to ensure stale
build checkouts for each project version are removed.
'''
try:
path = self.get_build_path()
if path is not None:
log.debug('Removing build path {0} for {1}'.format(
path, self))
rmtree(path)
except OSError:
log.error('Build path cleanup failed', exc_info=True)
def get_github_url(self, docroot, filename, source_suffix='.rst', action='view'):
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
else:
if docroot[0] != '/':
docroot = "/%s" % docroot
if docroot[-1] != '/':
docroot = "%s/" % docroot
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
for regex in GITHUB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_bitbucket_url(self, docroot, filename, source_suffix='.rst'):
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
for regex in BITBUCKET_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return BITBUCKET_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
class VersionAlias(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(_('To slug'), max_length=255, default='',
blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __unicode__(self):
return ugettext(u"Alias for %(project)s: %(from)s -> %(to)s" % {
'project': self.project,
'from': self.from_slug,
'to': self.to_slug,
})
class Build(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='builds')
version = models.ForeignKey(Version, verbose_name=_('Version'), null=True,
related_name='builds')
type = models.CharField(_('Type'), max_length=55, choices=BUILD_TYPES,
default='html')
state = models.CharField(_('State'), max_length=55, choices=BUILD_STATE,
default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'), default=True)
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), null=True, blank=True)
commit = models.CharField(_('Commit'), max_length=255, null=True, blank=True)
length = models.IntegerField(_('Build Length'), null=True, blank=True)
builder = models.CharField(_('Builder'), max_length=255, null=True, blank=True)
# Manager
objects = BuildManager()
class Meta:
ordering = ['-date']
get_latest_by = 'date'
index_together = [
['version', 'state', 'type']
]
def __unicode__(self):
return ugettext(u"Build %(project)s for %(usernames)s (%(pk)s)" % {
'project': self.project,
'usernames': ' '.join(self.project.users.all()
.values_list('username', flat=True)),
'pk': self.pk,
})
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
@property
def finished(self):
'''Return if build has a finished state'''
return self.state == BUILD_STATE_FINISHED
class BuildCommandResultMixin(object):
'''Mixin for common command result methods/properties
Shared methods between the database model :py:cls:`BuildCommandResult` and
non-model respresentations of build command results from the API
'''
@property
def successful(self):
'''Did the command exit with a successful exit code'''
return self.exit_code == 0
@property
def failed(self):
'''Did the command exit with a failing exit code
Helper for inverse of :py:meth:`successful`'''
return not self.successful
class BuildCommandResult(BuildCommandResultMixin, models.Model):
build = models.ForeignKey(Build, verbose_name=_('Build'),
related_name='commands')
command = models.TextField(_('Command'))
description = models.TextField(_('Description'), blank=True)
output = models.TextField(_('Command output'), blank=True)
exit_code = models.IntegerField(_('Command exit code'))
start_time = models.DateTimeField(_('Start time'))
end_time = models.DateTimeField(_('End time'))
class Meta:
ordering = ['start_time']
get_latest_by = 'start_time'
objects = RelatedBuildManager()
def __unicode__(self):
return (ugettext(u'Build command {pk} for build {build}')
.format(pk=self.pk, build=self.build))
@property
def run_time(self):
"""Total command runtime in seconds"""
if self.start_time is not None and self.end_time is not None:
diff = self.end_time - self.start_time
return diff.seconds
| mit |
maciekcc/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
SebastianMerz/calalert | Server/venv/lib/python2.7/site-packages/jinja2/sandbox.py | 637 | 13445 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, function_type, method_type, \
traceback_type, code_type, frame_type, generator_type, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
if not PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set()
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, function_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, method_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (code_type, traceback_type, frame_type)):
return True
elif isinstance(obj, generator_type):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| gpl-2.0 |
MTK6580/walkie-talkie | ALPS.L1.MP6.V2_HEXING6580_WE_L/alps/cts/suite/audio_quality/test_description/processing/calc_delay.py | 5 | 2521 | #!/usr/bin/python
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import numpy.linalg
import scipy as sp
import scipy.fftpack
import scipy.signal
import math
import sys
from multiprocessing import Pool
def convolution(data0, data1reversed, n):
"""calculate convolution part of data0 with data1 from pos n"""
N = len(data1reversed)
return np.dot(data0[n:N+n], data1reversed)
def convolutionstar(args):
return convolution(*args)
def calc_delay(data0, data1):
"""Calcuate delay between two data. data0 is assumed to be recorded first,
and will have longer length than data1
returns delay between data0 and data1 in number of samples in data0's point of view"""
len0 = len(data0)
len1 = len(data1)
if len1 > len0:
print "data1 longer than data0"
return -1
searchLen = len0 - len1
data1reverse = data1[::-1]
# This is faster than signal.correlate as there is no need to process
# full data, but still it is slow. about 18 secs for data0 of 4 secs with data1 of 1 secs
print "***Caluclating delay, may take some time***"
gData0 = data0
gData1 = data1reverse
pool = Pool(processes = 4)
TASK = [(data0, data1reverse, i) for i in range(searchLen)]
result = pool.map(convolutionstar, TASK)
return np.argmax(result)
# test code
if __name__=="__main__":
samplingRate = 44100
durationInSec = 0.001
if len(sys.argv) > 1:
durationInSec = float(sys.argv[1])
signalFrequency = 1000
samples = float(samplingRate) * float(durationInSec)
index = np.linspace(0.0, samples, num=samples, endpoint=False)
time = index / samplingRate
multiplier = 2.0 * np.pi * signalFrequency / float(samplingRate)
data0 = np.sin(index * multiplier)
DELAY = durationInSec / 2.0 * samplingRate
data1 = data0[DELAY:]
delay = calc_delay(data0, data1)
print "calc_delay returned", delay, " while expecting ", DELAY
| gpl-3.0 |
kevin-coder/tensorflow-fork | tensorflow/contrib/slim/python/slim/evaluation.py | 24 | 11795 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
**********************
* Evaluating Metrics *
**********************
In the simplest use case, we use a model to create the predictions, then specify
the metrics and choose one model checkpoint, finally call the`evaluation_once`
method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"accuracy": slim.metrics.accuracy(predictions, labels),
"mse": slim.metrics.mean_squared_error(predictions, labels),
})
checkpoint_path = '/tmp/my_model_dir/my_checkpoint'
log_dir = '/tmp/my_model_eval/'
initial_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer())
metric_values = slim.evaluate_once(
master='',
checkpoint_path=checkpoint_path,
log_dir=log_dir,
num_evals=1,
initial_op=initial_op,
eval_op=names_to_updates.values(),
final_op=name_to_values.values())
for metric, value in zip(names_to_values.keys(), metric_values):
logging.info('Metric %s has value: %f', metric, value)
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluation_loop method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"accuracy": slim.metrics.accuracy(predictions, labels),
"mse": slim.metrics.mean_squared_error(predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
slim.evaluation_loop(
'',
checkpoint_dir,
logdir,
num_evals=num_evals,
eval_op=names_to_updates.values(),
summary_op=tf.contrib.deprecated.merge_summary(summary_ops),
eval_interval_secs=600)
**************************************************
* Evaluating a Checkpointed Model with Summaries *
**************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_op' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.summary.scalar(...)
tf.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
slim.evaluation_loop(
master='',
checkpoint_dir,
logdir,
num_evals=1,
summary_op=tf.contrib.deprecated.merge_summary(summary_ops),
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
__all__ = [
'evaluate_once',
'evaluation_loop',
'wait_for_new_checkpoint',
'checkpoints_iterator',
]
wait_for_new_checkpoint = evaluation.wait_for_new_checkpoint
checkpoints_iterator = evaluation.checkpoints_iterator
_USE_DEFAULT = 0
def evaluate_once(master,
checkpoint_path,
logdir,
num_evals=1,
initial_op=None,
initial_op_feed_dict=None,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
session_config=None,
hooks=None):
"""Evaluates the model at the given checkpoint path.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_path: The path to a checkpoint to use for evaluation.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
initial_op: An operation run at the beginning of evaluation.
initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.summary.merge_all().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
hooks: A list of additional `SessionRunHook` objects to pass during the
evaluation.
Returns:
The value of `final_op` or `None` if `final_op` is `None`.
"""
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
all_hooks = [evaluation.StopAfterNEvalsHook(num_evals),]
if summary_op is not None:
all_hooks.append(evaluation.SummaryAtEndHook(
log_dir=logdir, summary_op=summary_op, feed_dict=summary_op_feed_dict))
if hooks is not None:
all_hooks.extend(hooks)
saver = None
if variables_to_restore is not None:
saver = tf_saver.Saver(variables_to_restore)
return evaluation.evaluate_once(
checkpoint_path,
master=master,
scaffold=monitored_session.Scaffold(
init_op=initial_op, init_feed_dict=initial_op_feed_dict, saver=saver),
eval_ops=eval_op,
feed_dict=eval_op_feed_dict,
final_ops=final_op,
final_ops_feed_dict=final_op_feed_dict,
hooks=all_hooks,
config=session_config)
def evaluation_loop(master,
checkpoint_dir,
logdir,
num_evals=1,
initial_op=None,
initial_op_feed_dict=None,
init_fn=None,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
eval_interval_secs=60,
max_number_of_evaluations=None,
session_config=None,
timeout=None,
timeout_fn=None,
hooks=None):
"""Runs TF-Slim's Evaluation Loop.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_dir: The directory where checkpoints are stored.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
initial_op: An operation run at the beginning of evaluation.
initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.summary.merge_all().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
eval_interval_secs: The minimum number of seconds between evaluations.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as 'None', the evaluation continues indefinitely.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
hooks: A list of additional `SessionRunHook` objects to pass during
repeated evaluations.
Returns:
The value of `final_op` or `None` if `final_op` is `None`.
"""
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
all_hooks = [evaluation.StopAfterNEvalsHook(num_evals),]
if summary_op is not None:
all_hooks.append(evaluation.SummaryAtEndHook(
log_dir=logdir, summary_op=summary_op, feed_dict=summary_op_feed_dict))
if hooks is not None:
# Add custom hooks if provided.
all_hooks.extend(hooks)
saver = None
if variables_to_restore is not None:
saver = tf_saver.Saver(variables_to_restore)
return evaluation.evaluate_repeatedly(
checkpoint_dir,
master=master,
scaffold=monitored_session.Scaffold(
init_op=initial_op, init_feed_dict=initial_op_feed_dict,
init_fn=init_fn, saver=saver),
eval_ops=eval_op,
feed_dict=eval_op_feed_dict,
final_ops=final_op,
final_ops_feed_dict=final_op_feed_dict,
eval_interval_secs=eval_interval_secs,
hooks=all_hooks,
config=session_config,
max_number_of_evaluations=max_number_of_evaluations,
timeout=timeout,
timeout_fn=timeout_fn)
| apache-2.0 |
gileno/tapioca-github | tapioca_github/tapioca_github.py | 1 | 1274 | # -*- coding: utf-8 -*-
from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from requests_oauthlib import OAuth2
from .resource_mapping import RESOURCE_MAPPING
class GithubClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = 'https://api.github.com/'
resource_mapping = RESOURCE_MAPPING
def get_request_kwargs(self, api_params, *args, **kwargs):
arguments = super(GithubClientAdapter, self).get_request_kwargs(
api_params, *args, **kwargs)
client_id = api_params.get('client_id')
arguments['auth'] = OAuth2(
client_id, token={'access_token': api_params.get('access_token')})
return arguments
def get_iterator_list(self, response_data):
return response_data
def get_iterator_next_request_kwargs(
self, iterator_request_kwargs, response_data, response):
if "Link" in response.headers:
links = response.headers["Link"].split(", ")
for link in links:
(url, rel) = link.split("; ")
url = url[1:-1]
rel = rel[5:-1]
if rel == 'next':
return {'url': url}
Github = generate_wrapper_from_adapter(GithubClientAdapter)
| mit |
whardier/indicted | indicted/__init__.py | 1 | 2707 |
#!/usr/bin/env python
#
# Copyright (c) 2012 Shane R. Spencer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""indicted - Indexed Dictionary Class"""
__name__ = "indicted"
__author__ = 'Shane R. Spencer'
__email__ = "shane@bogomip.com"
__license__ = 'MIT'
__copyright__ = '2012 Shane R. Spencer'
__version__ = '0.0.1'
__status__ = "Prototype"
__description__ = "Indexed Dictionary Class"
from collections import OrderedDict
class InList(list):
def __init__(self, iterable=[], indexclass=None, indexkey=None):
self.__ids = {}
list.__init__(self, iterable)
for n, i in enumerate(self):
if issubclass(type(i), dict):
_id = i.get(indexkey)
if issubclass(type(_id), indexclass):
self.__ids[_id] = int(n)
def find(self, id):
_n = self.__ids.get(id)
if not isinstance(_n, None.__class__):
return self[_n]
else:
return None
def ids(self):
return sorted(self.__ids.keys(), key=lambda k: self.__ids[k])
class _InDict(object):
INDEXCLASS = int
INDEXKEY = "_id"
def __init__(self, *args, **kwargs):
if issubclass(type(self), dict):
self.__dict_class = dict
if issubclass(type(self), OrderedDict):
self.__dict_class = OrderedDict
self.__dict_class.__init__(self, *args, **kwargs)
def __setitem__(self, key, val):
if issubclass(type(val), list): val = InList(val, self.INDEXCLASS, self.INDEXKEY)
self.__dict_class.__setitem__(self, key, val)
class InDict(_InDict, dict):
pass
class OrderedInDict(_InDict, OrderedDict):
pass
| mit |
SequencingDOTcom/oAuth2-demo | python-django/oauth2demo/appconfig.py | 4 | 1718 | class AppConfig():
"""
URI of Sequencing oAuth2 where you can request user to authorize your app.
"""
oauth2_authorization_uri = 'https://sequencing.com/oauth2/authorize'
"""
Sequencing API endpoint.
"""
api_uri = 'https://api.sequencing.com'
"""
Redirect URI of your oauth2 app, where it expects Sequencing oAuth2 to
redirect browser.
"""
redirect_uri = 'https://python-oauth-demo.sequencing.com/Default/Authcallback'
"""
Id of your oauth2 app (oauth2 client).
You will be able to get this value from Sequencing website.
"""
client_id = 'oAuth2 Demo Python'
"""
Secret of your oauth2 app (oauth2 client).
You will be able to get this value from Sequencing website.
Keep this value private.
"""
client_secret = 'cyqZOLZfVET_EsKv3f1xekpqe8FZDlG2rNwK5JZyMFkRisKpNC1s-IlM3hj6KlE4e2SsYRDM903Mj2T699fBCw'
"""
Supply here 'code', which means you want to take
the route of authorization code response
"""
response_type = 'code'
"""
oAuth2 state.
It should be some random generated string. State you sent to authorize URI
must match the state you get, when browser is redirected to the redirect URI
you provided.
"""
state = '900150983cd24fb0d6963f7d28e17f72'
"""
Array of scopes, access to which you request.
"""
scopes = ['demo']
"""
URI of Sequencing oAuth2 where you can obtain access token.
"""
oauth2_token_uri = 'https://sequencing.com/oauth2/token'
"""
Supply here 'authorization_code', which means you request to
exchange the authorization code for the aouth2 tokens
"""
grant_type= 'authorization_code'
| mit |
mgahsan/QuantEcon.py | examples/preim1.py | 7 | 1294 | """
QE by Tom Sargent and John Stachurski.
Illustrates preimages of functions
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 0.6 * np.cos(4 * x) + 1.4
xmin, xmax = -1, 1
x = np.linspace(xmin, xmax, 160)
y = f(x)
ya, yb = np.min(y), np.max(y)
fig, axes = plt.subplots(2, 1, figsize=(8, 8))
for ax in axes:
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set_ylim(-0.6, 3.2)
ax.set_xlim(xmin, xmax)
ax.set_yticks(())
ax.set_xticks(())
ax.plot(x, y, 'k-', lw=2, label=r'$f$')
ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05)
ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$')
ax.text(0.04, -0.3, '$0$', fontsize=16)
ax = axes[0]
ax.legend(loc='upper right', frameon=False)
ybar = 1.5
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.05, 0.8 * ybar, r'$y$', fontsize=16)
for i, z in enumerate((-0.35, 0.35)):
ax.vlines(z, 0, f(z), linestyle='--', alpha=0.5)
ax.text(z, -0.2, r'$x_{}$'.format(i), fontsize=16)
ax = axes[1]
ybar = 2.6
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.04, 0.91 * ybar, r'$y$', fontsize=16)
plt.show()
| bsd-3-clause |
patrick-nicholson/spark | python/pyspark/streaming/kinesis.py | 76 | 6224 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.protocol import Py4JJavaError
from pyspark.serializers import PairDeserializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.streaming import DStream
__all__ = ['KinesisUtils', 'InitialPositionInStream', 'utf8_decoder']
def utf8_decoder(s):
""" Decode the unicode as UTF-8 """
if s is None:
return None
return s.decode('utf-8')
class KinesisUtils(object):
@staticmethod
def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
initialPositionInStream, checkpointInterval,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
"""
Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
jduration = ssc._jduration(checkpointInterval)
try:
# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
KinesisUtils._printErrorMsg(ssc.sparkContext)
raise
jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
regionName, initialPositionInStream, jduration, jlevel,
awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
stsSessionName, stsExternalId)
stream = DStream(jstream, ssc, NoOpSerializer())
return stream.map(lambda v: decoder(v))
@staticmethod
def _printErrorMsg(sc):
print("""
________________________________________________________________________________________________
Spark Streaming's Kinesis libraries not found in class path. Try one of the following.
1. Include the Kinesis library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-streaming-kinesis-asl:%s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-streaming-kinesis-asl-assembly, Version = %s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-streaming-kinesis-asl-assembly.jar> ...
________________________________________________________________________________________________
""" % (sc.version, sc.version))
class InitialPositionInStream(object):
LATEST, TRIM_HORIZON = (0, 1)
| apache-2.0 |
ingadhoc/odoo | addons/sale/res_config.py | 175 | 8813 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class sale_configuration(osv.TransientModel):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_so_lines': fields.boolean('Generate invoices based on the sales order lines',
implied_group='sale.group_invoice_so_lines',
help="To allow your salesman to make invoices for sales order lines using the menu 'Lines to Invoice'."),
'timesheet': fields.boolean('Prepare invoices based on timesheets',
help='For modifying account analytic view to show important data to project manager of services companies.'
'You can also view the report of account analytic summary user-wise as well as month wise.\n'
'-This installs the module account_analytic_analysis.'),
'module_account_analytic_analysis': fields.boolean('Use contracts management',
help='Allows to define your customer contracts conditions: invoicing '
'method (fixed price, on timesheet, advance invoice), the exact pricing '
'(650€/day for a developer), the duration (one year support contract).\n'
'You will be able to follow the progress of the contract and invoice automatically.\n'
'-It installs the account_analytic_analysis module.'),
'time_unit': fields.many2one('product.uom', 'The default working time unit for services is'),
'group_sale_pricelist':fields.boolean("Use pricelists to adapt your price per customers",
implied_group='product.group_sale_pricelist',
help="""Allows to manage different prices based on rules per category of customers.
Example: 10% for retailers, promotion of 5 EUR on this product, etc."""),
'group_uom':fields.boolean("Allow using different units of measure",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_discount_per_so_line': fields.boolean("Allow setting a discount on the sales order lines",
implied_group='sale.group_discount_per_so_line',
help="Allows you to apply some discount per sales order line."),
'module_warning': fields.boolean("Allow configuring alerts by customer or products",
help='Allow to configure notification on products and trigger them when a user wants to sell a given product or a given customer.\n'
'Example: Product: this product is deprecated, do not purchase more than 5.\n'
'Supplier: don\'t forget to ask for an express delivery.'),
'module_sale_margin': fields.boolean("Display margins on sales orders",
help='This adds the \'Margin\' on sales order.\n'
'This gives the profitability by calculating the difference between the Unit Price and Cost Price.\n'
'-This installs the module sale_margin.'),
'module_website_quote': fields.boolean("Allow online quotations and templates",
help='This adds the online quotation'),
'module_sale_journal': fields.boolean("Allow batch invoicing of delivery orders through journals",
help='Allows you to categorize your sales and deliveries (picking lists) between different journals, '
'and perform batch operations on journals.\n'
'-This installs the module sale_journal.'),
'module_analytic_user_function': fields.boolean("One employee can have different roles per contract",
help='Allows you to define what is the default function of a specific user on a given account.\n'
'This is mostly used when a user encodes his timesheet. The values are retrieved and the fields are auto-filled. '
'But the possibility to change these values is still available.\n'
'-This installs the module analytic_user_function.'),
'module_project': fields.boolean("Project"),
'module_sale_stock': fields.boolean("Trigger delivery orders automatically from sales orders",
help='Allows you to Make Quotation, Sale Order using different Order policy and Manage Related Stock.\n'
'-This installs the module sale_stock.'),
'group_sale_delivery_address': fields.boolean("Allow a different address for delivery and invoicing ",
implied_group='sale.group_delivery_invoice_address',
help="Allows you to specify different delivery and invoice addresses on a sales order."),
}
def default_get(self, cr, uid, fields, context=None):
ir_model_data = self.pool.get('ir.model.data')
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
if res.get('module_project'):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res['time_unit'] = user.company_id.project_time_mode_id.id
else:
product = ir_model_data.xmlid_to_object(cr, uid, 'product.product_product_consultant')
if product and product.exists():
res['time_unit'] = product.uom_id.id
res['timesheet'] = res.get('module_account_analytic_analysis')
return res
def _get_default_time_unit(self, cr, uid, context=None):
ids = self.pool.get('product.uom').search(cr, uid, [('name', '=', _('Hour'))], context=context)
return ids and ids[0] or False
_defaults = {
'time_unit': _get_default_time_unit,
}
def set_sale_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids)[0]
if wizard.time_unit:
product = ir_model_data.xmlid_to_object(cr, uid, 'product.product_product_consultant')
if product and product.exists():
product.write({'uom_id': wizard.time_unit.id, 'uom_po_id': wizard.time_unit.id})
else:
_logger.warning("Product with xml_id 'product.product_product_consultant' not found, UoMs not updated!")
if wizard.module_project and wizard.time_unit:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': wizard.time_unit.id})
return {}
def onchange_task_work(self, cr, uid, ids, task_work, context=None):
if not task_work:
return {'value': {}}
return {'value': {
'module_project_timesheet': task_work,
'module_sale_service': task_work,
}}
def onchange_timesheet(self, cr, uid, ids, timesheet, context=None):
return {'value': {
'timesheet': timesheet,
'module_account_analytic_analysis': timesheet,
}}
class account_config_settings(osv.osv_memory):
_inherit = 'account.config.settings'
_columns = {
'module_sale_analytic_plans': fields.boolean('Use multiple analytic accounts on sales',
help="""This allows install module sale_analytic_plans."""),
'group_analytic_account_for_sales': fields.boolean('Analytic accounting for sales',
implied_group='sale.group_analytic_accounting',
help="Allows you to specify an analytic account on sales orders."),
}
def onchange_sale_analytic_plans(self, cr, uid, ids, module_sale_analytic_plans, context=None):
""" change group_analytic_account_for_sales following module_sale_analytic_plans """
if not module_sale_analytic_plans:
return {}
return {'value': {'group_analytic_account_for_sales': module_sale_analytic_plans}}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lihui7115/ChromiumGStreamerBackend | net/data/websocket/trailing-whitespace_wsh.py | 47 | 1039 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The purpose of this test is to verify that the WebSocket handshake correctly
# ignores trailing whitespace on response headers.
# It is used by test case WebSocketEndToEndTest.TrailingWhitespace.
from mod_pywebsocket import handshake
from mod_pywebsocket.handshake.hybi import compute_accept
def web_socket_do_extra_handshake(request):
accept = compute_accept(request.headers_in['Sec-WebSocket-Key'])[0]
message = ('HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: %s\r\n'
'Sec-WebSocket-Protocol: sip \r\n'
'\r\n' % accept)
request.connection.write(message)
# Prevent pywebsocket from sending its own handshake message.
raise handshake.AbortedByUserException('Close the connection')
def web_socket_transfer_data(request):
pass
| bsd-3-clause |
laanwj/deluge | deluge/ui/gtkui/preferences.py | 1 | 44502 | #
# preferences.py
#
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade
import pkg_resources
import deluge.component as component
from deluge.log import LOG as log
from deluge.ui.client import client
import deluge.common
import deluge.error
import common
from deluge.configmanager import ConfigManager
import deluge.configmanager
class Preferences(component.Component):
def __init__(self):
component.Component.__init__(self, "Preferences")
self.window = component.get("MainWindow")
self.glade = gtk.glade.XML(
pkg_resources.resource_filename("deluge.ui.gtkui",
"glade/preferences_dialog.glade"))
self.pref_dialog = self.glade.get_widget("pref_dialog")
self.pref_dialog.set_icon(common.get_deluge_icon())
self.treeview = self.glade.get_widget("treeview")
self.notebook = self.glade.get_widget("notebook")
self.gtkui_config = ConfigManager("gtkui.conf")
self.glade.get_widget("image_magnet").set_from_file(
deluge.common.get_pixmap("magnet.png"))
# Setup the liststore for the categories (tab pages)
self.liststore = gtk.ListStore(int, str)
self.treeview.set_model(self.liststore)
render = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Categories"), render, text=1)
self.treeview.append_column(column)
# Add the default categories
i = 0
for category in [_("Downloads"), _("Network"), _("Bandwidth"),
_("Interface"), _("Other"), _("Daemon"), _("Queue"), _("Proxy"),
_("Cache"), _("Plugins")]:
self.liststore.append([i, category])
i += 1
# Setup plugin tab listview
self.plugin_liststore = gtk.ListStore(str, bool)
self.plugin_liststore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.plugin_listview = self.glade.get_widget("plugin_listview")
self.plugin_listview.set_model(self.plugin_liststore)
render = gtk.CellRendererToggle()
render.connect("toggled", self.on_plugin_toggled)
render.set_property("activatable", True)
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Enabled"), render, active=1))
self.plugin_listview.append_column(
gtk.TreeViewColumn(_("Plugin"), gtk.CellRendererText(), text=0))
# Connect to the 'changed' event of TreeViewSelection to get selection
# changes.
self.treeview.get_selection().connect("changed",
self.on_selection_changed)
self.plugin_listview.get_selection().connect("changed",
self.on_plugin_selection_changed)
self.glade.signal_autoconnect({
"on_pref_dialog_delete_event": self.on_pref_dialog_delete_event,
"on_button_ok_clicked": self.on_button_ok_clicked,
"on_button_apply_clicked": self.on_button_apply_clicked,
"on_button_cancel_clicked": self.on_button_cancel_clicked,
"on_toggle": self.on_toggle,
"on_test_port_clicked": self.on_test_port_clicked,
"on_button_plugin_install_clicked": self._on_button_plugin_install_clicked,
"on_button_rescan_plugins_clicked": self._on_button_rescan_plugins_clicked,
"on_button_find_plugins_clicked": self._on_button_find_plugins_clicked,
"on_button_cache_refresh_clicked": self._on_button_cache_refresh_clicked,
"on_combo_proxy_type_changed": self._on_combo_proxy_type_changed,
"on_button_associate_magnet_clicked": self._on_button_associate_magnet_clicked
})
# These get updated by requests done to the core
self.all_plugins = []
self.enabled_plugins = []
def __del__(self):
del self.gtkui_config
def add_page(self, name, widget):
"""Add a another page to the notebook"""
# Create a header and scrolled window for the preferences tab
parent = widget.get_parent()
if parent:
parent.remove(widget)
vbox = gtk.VBox()
label = gtk.Label()
label.set_use_markup(True)
label.set_markup("<b><i><big>" + name + "</big></i></b>")
label.set_alignment(0.00, 0.50)
label.set_padding(10, 10)
vbox.pack_start(label, False, True, 0)
sep = gtk.HSeparator()
vbox.pack_start(sep, False, True, 0)
align = gtk.Alignment()
align.set_padding(5, 0, 0, 0)
align.set(0, 0, 1, 1)
align.add(widget)
vbox.pack_start(align, True, True, 0)
scrolled = gtk.ScrolledWindow()
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_NONE)
viewport.add(vbox)
scrolled.add(viewport)
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolled.show_all()
# Add this page to the notebook
index = self.notebook.append_page(scrolled)
self.liststore.append([index, name])
return name
def remove_page(self, name):
"""Removes a page from the notebook"""
self.page_num_to_remove = None
self.iter_to_remove = None
def check_row(model, path, iter, user_data):
row_name = model.get_value(iter, 1)
if row_name == user_data:
# This is the row we need to remove
self.page_num_to_remove = model.get_value(iter, 0)
self.iter_to_remove = iter
return
self.liststore.foreach(check_row, name)
# Remove the page and row
if self.page_num_to_remove != None:
self.notebook.remove_page(self.page_num_to_remove)
if self.iter_to_remove != None:
self.liststore.remove(self.iter_to_remove)
# We need to re-adjust the index values for the remaining pages
for i, (index, name) in enumerate(self.liststore):
self.liststore[i][0] = i
def show(self, page=None):
"""Page should be the string in the left list.. ie, 'Network' or
'Bandwidth'"""
if page != None:
for (index, string) in self.liststore:
if page == string:
self.treeview.get_selection().select_path(index)
break
component.get("PluginManager").run_on_show_prefs()
# Update the preferences dialog to reflect current config settings
self.core_config = {}
if client.connected():
def _on_get_config(config):
self.core_config = config
client.core.get_available_plugins().addCallback(_on_get_available_plugins)
def _on_get_available_plugins(plugins):
self.all_plugins = plugins
client.core.get_enabled_plugins().addCallback(_on_get_enabled_plugins)
def _on_get_enabled_plugins(plugins):
self.enabled_plugins = plugins
client.core.get_listen_port().addCallback(_on_get_listen_port)
def _on_get_listen_port(port):
self.active_port = port
client.core.get_cache_status().addCallback(_on_get_cache_status)
def _on_get_cache_status(status):
self.cache_status = status
self._show()
# This starts a series of client.core requests prior to showing the window
client.core.get_config().addCallback(_on_get_config)
else:
self._show()
def _show(self):
if self.core_config != {} and self.core_config != None:
core_widgets = {
"download_path_button": \
("filename", self.core_config["download_location"]),
"chk_move_completed": \
("active", self.core_config["move_completed"]),
"move_completed_path_button": \
("filename", self.core_config["move_completed_path"]),
"chk_copy_torrent_file": \
("active", self.core_config["copy_torrent_file"]),
"chk_del_copy_torrent_file": \
("active", self.core_config["del_copy_torrent_file"]),
"torrent_files_button": \
("filename", self.core_config["torrentfiles_location"]),
"chk_autoadd": \
("active", self.core_config["autoadd_enable"]),
"folder_autoadd": \
("filename", self.core_config["autoadd_location"]),
"radio_compact_allocation": \
("active", self.core_config["compact_allocation"]),
"radio_full_allocation": \
("not_active", self.core_config["compact_allocation"]),
"chk_prioritize_first_last_pieces": \
("active",
self.core_config["prioritize_first_last_pieces"]),
"chk_add_paused": ("active", self.core_config["add_paused"]),
"spin_port_min": ("value", self.core_config["listen_ports"][0]),
"spin_port_max": ("value", self.core_config["listen_ports"][1]),
"active_port_label": ("text", str(self.active_port)),
"chk_random_port": ("active", self.core_config["random_port"]),
"spin_outgoing_port_min": ("value", self.core_config["outgoing_ports"][0]),
"spin_outgoing_port_max": ("value", self.core_config["outgoing_ports"][1]),
"chk_random_outgoing_ports": ("active", self.core_config["random_outgoing_ports"]),
"entry_interface": ("text", self.core_config["listen_interface"]),
"entry_peer_tos": ("text", self.core_config["peer_tos"]),
"chk_dht": ("active", self.core_config["dht"]),
"chk_upnp": ("active", self.core_config["upnp"]),
"chk_natpmp": ("active", self.core_config["natpmp"]),
"chk_utpex": ("active", self.core_config["utpex"]),
"chk_lsd": ("active", self.core_config["lsd"]),
"chk_new_releases": ("active", self.core_config["new_release_check"]),
"chk_send_info": ("active", self.core_config["send_info"]),
"entry_geoip": ("text", self.core_config["geoip_db_location"]),
"combo_encin": ("active", self.core_config["enc_in_policy"]),
"combo_encout": ("active", self.core_config["enc_out_policy"]),
"combo_enclevel": ("active", self.core_config["enc_level"]),
"chk_pref_rc4": ("active", self.core_config["enc_prefer_rc4"]),
"spin_max_connections_global": \
("value", self.core_config["max_connections_global"]),
"spin_max_download": \
("value", self.core_config["max_download_speed"]),
"spin_max_upload": \
("value", self.core_config["max_upload_speed"]),
"spin_max_upload_slots_global": \
("value", self.core_config["max_upload_slots_global"]),
"spin_max_half_open_connections": \
("value", self.core_config["max_half_open_connections"]),
"spin_max_connections_per_second": \
("value", self.core_config["max_connections_per_second"]),
"chk_ignore_limits_on_local_network": \
("active", self.core_config["ignore_limits_on_local_network"]),
"chk_rate_limit_ip_overhead": \
("active", self.core_config["rate_limit_ip_overhead"]),
"spin_max_connections_per_torrent": \
("value", self.core_config["max_connections_per_torrent"]),
"spin_max_upload_slots_per_torrent": \
("value", self.core_config["max_upload_slots_per_torrent"]),
"spin_max_download_per_torrent": \
("value", self.core_config["max_download_speed_per_torrent"]),
"spin_max_upload_per_torrent": \
("value", self.core_config["max_upload_speed_per_torrent"]),
"spin_daemon_port": \
("value", self.core_config["daemon_port"]),
"chk_allow_remote_connections": \
("active", self.core_config["allow_remote"]),
"spin_active": ("value", self.core_config["max_active_limit"]),
"spin_seeding": ("value", self.core_config["max_active_seeding"]),
"spin_downloading": ("value", self.core_config["max_active_downloading"]),
"chk_dont_count_slow_torrents": ("active", self.core_config["dont_count_slow_torrents"]),
"chk_queue_new_top": ("active", self.core_config["queue_new_to_top"]),
"spin_share_ratio_limit": ("value", self.core_config["share_ratio_limit"]),
"spin_seed_time_ratio_limit": \
("value", self.core_config["seed_time_ratio_limit"]),
"spin_seed_time_limit": ("value", self.core_config["seed_time_limit"]),
"chk_seed_ratio": ("active", self.core_config["stop_seed_at_ratio"]),
"spin_share_ratio": ("value", self.core_config["stop_seed_ratio"]),
"chk_remove_ratio": ("active", self.core_config["remove_seed_at_ratio"]),
"spin_cache_size": ("value", self.core_config["cache_size"]),
"spin_cache_expiry": ("value", self.core_config["cache_expiry"])
}
# Add proxy stuff
for t in ("peer", "web_seed", "tracker", "dht"):
core_widgets["spin_proxy_port_%s" % t] = ("value", self.core_config["proxies"][t]["port"])
core_widgets["combo_proxy_type_%s" % t] = ("active", self.core_config["proxies"][t]["type"])
core_widgets["txt_proxy_server_%s" % t] = ("text", self.core_config["proxies"][t]["hostname"])
core_widgets["txt_proxy_username_%s" % t] = ("text", self.core_config["proxies"][t]["username"])
core_widgets["txt_proxy_password_%s" % t] = ("text", self.core_config["proxies"][t]["password"])
# Change a few widgets if we're connected to a remote host
if not client.is_localhost():
self.glade.get_widget("entry_download_path").show()
self.glade.get_widget("download_path_button").hide()
core_widgets.pop("download_path_button")
core_widgets["entry_download_path"] = ("text", self.core_config["download_location"])
self.glade.get_widget("entry_move_completed_path").show()
self.glade.get_widget("move_completed_path_button").hide()
core_widgets.pop("move_completed_path_button")
core_widgets["entry_move_completed_path"] = ("text", self.core_config["move_completed_path"])
self.glade.get_widget("entry_torrents_path").show()
self.glade.get_widget("torrent_files_button").hide()
core_widgets.pop("torrent_files_button")
core_widgets["entry_torrents_path"] = ("text", self.core_config["torrentfiles_location"])
self.glade.get_widget("entry_autoadd").show()
self.glade.get_widget("folder_autoadd").hide()
core_widgets.pop("folder_autoadd")
core_widgets["entry_autoadd"] = ("text", self.core_config["autoadd_location"])
else:
self.glade.get_widget("entry_download_path").hide()
self.glade.get_widget("download_path_button").show()
self.glade.get_widget("entry_move_completed_path").hide()
self.glade.get_widget("move_completed_path_button").show()
self.glade.get_widget("entry_torrents_path").hide()
self.glade.get_widget("torrent_files_button").show()
self.glade.get_widget("entry_autoadd").hide()
self.glade.get_widget("folder_autoadd").show()
# Update the widgets accordingly
for key in core_widgets.keys():
modifier = core_widgets[key][0]
value = core_widgets[key][1]
widget = self.glade.get_widget(key)
if type(widget) == gtk.FileChooserButton:
for child in widget.get_children():
child.set_sensitive(True)
widget.set_sensitive(True)
if modifier == "filename":
if value:
try:
widget.set_current_folder(value)
except Exception, e:
log.debug("Unable to set_current_folder: %s", e)
elif modifier == "active":
widget.set_active(value)
elif modifier == "not_active":
widget.set_active(not value)
elif modifier == "value":
widget.set_value(float(value))
elif modifier == "text":
widget.set_text(value)
for key in core_widgets.keys():
widget = self.glade.get_widget(key)
# Update the toggle status if necessary
self.on_toggle(widget)
else:
core_widget_list = [
"download_path_button",
"chk_move_completed",
"move_completed_path_button",
"chk_copy_torrent_file",
"chk_del_copy_torrent_file",
"torrent_files_button",
"chk_autoadd",
"folder_autoadd",
"radio_compact_allocation",
"radio_full_allocation",
"chk_prioritize_first_last_pieces",
"chk_add_paused",
"spin_port_min",
"spin_port_max",
"active_port_label",
"chk_random_port",
"spin_outgoing_port_min",
"spin_outgoing_port_max",
"chk_random_outgoing_ports",
"entry_interface",
"entry_peer_tos",
"chk_dht",
"chk_upnp",
"chk_natpmp",
"chk_utpex",
"chk_lsd",
"chk_send_info",
"chk_new_releases",
"entry_geoip",
"combo_encin",
"combo_encout",
"combo_enclevel",
"chk_pref_rc4",
"spin_max_connections_global",
"spin_max_download",
"spin_max_upload",
"spin_max_upload_slots_global",
"spin_max_half_open_connections",
"spin_max_connections_per_second",
"chk_ignore_limits_on_local_network",
"chk_rate_limit_ip_overhead",
"spin_max_connections_per_torrent",
"spin_max_upload_slots_per_torrent",
"spin_max_download_per_torrent",
"spin_max_upload_per_torrent",
"spin_daemon_port",
"chk_allow_remote_connections",
"spin_seeding",
"spin_downloading",
"spin_active",
"chk_dont_count_slow_torrents",
"chk_queue_new_top",
"chk_seed_ratio",
"spin_share_ratio",
"chk_remove_ratio",
"spin_share_ratio_limit",
"spin_seed_time_ratio_limit",
"spin_seed_time_limit",
"spin_cache_size",
"spin_cache_expiry",
"button_cache_refresh",
"btn_testport"
]
for t in ("peer", "web_seed", "tracker", "dht"):
core_widget_list.append("spin_proxy_port_%s" % t)
core_widget_list.append("combo_proxy_type_%s" % t)
core_widget_list.append("txt_proxy_username_%s" % t)
core_widget_list.append("txt_proxy_password_%s" % t)
core_widget_list.append("txt_proxy_server_%s" % t)
# We don't appear to be connected to a daemon
for key in core_widget_list:
widget = self.glade.get_widget(key)
if type(widget) == gtk.FileChooserButton:
for child in widget.get_children():
child.set_sensitive(False)
widget.set_sensitive(False)
## Downloads tab ##
self.glade.get_widget("chk_show_dialog").set_active(
self.gtkui_config["interactive_add"])
self.glade.get_widget("chk_focus_dialog").set_active(
self.gtkui_config["focus_add_dialog"])
## Interface tab ##
self.glade.get_widget("chk_use_tray").set_active(
self.gtkui_config["enable_system_tray"])
self.glade.get_widget("chk_min_on_close").set_active(
self.gtkui_config["close_to_tray"])
self.glade.get_widget("chk_start_in_tray").set_active(
self.gtkui_config["start_in_tray"])
self.glade.get_widget("chk_enable_appindicator").set_active(
self.gtkui_config["enable_appindicator"])
self.glade.get_widget("chk_lock_tray").set_active(
self.gtkui_config["lock_tray"])
self.glade.get_widget("chk_classic_mode").set_active(
self.gtkui_config["classic_mode"])
self.glade.get_widget("chk_show_rate_in_title").set_active(
self.gtkui_config["show_rate_in_title"])
## Other tab ##
self.glade.get_widget("chk_show_new_releases").set_active(
self.gtkui_config["show_new_releases"])
## Cache tab ##
if client.connected():
self.__update_cache_status()
## Plugins tab ##
all_plugins = self.all_plugins
enabled_plugins = self.enabled_plugins
# Clear the existing list so we don't duplicate entries.
self.plugin_liststore.clear()
# Iterate through the lists and add them to the liststore
for plugin in all_plugins:
if plugin in enabled_plugins:
enabled = True
else:
enabled = False
row = self.plugin_liststore.append()
self.plugin_liststore.set_value(row, 0, plugin)
self.plugin_liststore.set_value(row, 1, enabled)
# Now show the dialog
self.pref_dialog.show()
def set_config(self, hide=False):
"""
Sets all altered config values in the core.
:param hide: bool, if True, will not re-show the dialog and will hide it instead
"""
try:
from hashlib import sha1 as sha_hash
except ImportError:
from sha import new as sha_hash
# Get the values from the dialog
new_core_config = {}
new_gtkui_config = {}
## Downloads tab ##
new_gtkui_config["interactive_add"] = \
self.glade.get_widget("chk_show_dialog").get_active()
new_gtkui_config["focus_add_dialog"] = \
self.glade.get_widget("chk_focus_dialog").get_active()
new_core_config["copy_torrent_file"] = \
self.glade.get_widget("chk_copy_torrent_file").get_active()
new_core_config["del_copy_torrent_file"] = \
self.glade.get_widget("chk_del_copy_torrent_file").get_active()
new_core_config["move_completed"] = \
self.glade.get_widget("chk_move_completed").get_active()
if client.is_localhost():
new_core_config["download_location"] = \
self.glade.get_widget("download_path_button").get_filename()
new_core_config["move_completed_path"] = \
self.glade.get_widget("move_completed_path_button").get_filename()
new_core_config["torrentfiles_location"] = \
self.glade.get_widget("torrent_files_button").get_filename()
else:
new_core_config["download_location"] = \
self.glade.get_widget("entry_download_path").get_text()
new_core_config["move_completed_path"] = \
self.glade.get_widget("entry_move_completed_path").get_text()
new_core_config["torrentfiles_location"] = \
self.glade.get_widget("entry_torrents_path").get_text()
new_core_config["autoadd_enable"] = \
self.glade.get_widget("chk_autoadd").get_active()
if client.is_localhost():
new_core_config["autoadd_location"] = \
self.glade.get_widget("folder_autoadd").get_filename()
else:
new_core_config["autoadd_location"] = \
self.glade.get_widget("entry_autoadd").get_text()
new_core_config["compact_allocation"] = \
self.glade.get_widget("radio_compact_allocation").get_active()
new_core_config["prioritize_first_last_pieces"] = \
self.glade.get_widget(
"chk_prioritize_first_last_pieces").get_active()
new_core_config["add_paused"] = \
self.glade.get_widget("chk_add_paused").get_active()
## Network tab ##
listen_ports = (
self.glade.get_widget("spin_port_min").get_value_as_int(),
self.glade.get_widget("spin_port_max").get_value_as_int()
)
new_core_config["listen_ports"] = listen_ports
new_core_config["random_port"] = \
self.glade.get_widget("chk_random_port").get_active()
outgoing_ports = (
self.glade.get_widget("spin_outgoing_port_min").get_value_as_int(),
self.glade.get_widget("spin_outgoing_port_max").get_value_as_int()
)
new_core_config["outgoing_ports"] = outgoing_ports
new_core_config["random_outgoing_ports"] = \
self.glade.get_widget("chk_random_outgoing_ports").get_active()
new_core_config["listen_interface"] = self.glade.get_widget("entry_interface").get_text()
new_core_config["peer_tos"] = self.glade.get_widget("entry_peer_tos").get_text()
new_core_config["dht"] = self.glade.get_widget("chk_dht").get_active()
new_core_config["upnp"] = self.glade.get_widget("chk_upnp").get_active()
new_core_config["natpmp"] = \
self.glade.get_widget("chk_natpmp").get_active()
new_core_config["utpex"] = \
self.glade.get_widget("chk_utpex").get_active()
new_core_config["lsd"] = \
self.glade.get_widget("chk_lsd").get_active()
new_core_config["enc_in_policy"] = \
self.glade.get_widget("combo_encin").get_active()
new_core_config["enc_out_policy"] = \
self.glade.get_widget("combo_encout").get_active()
new_core_config["enc_level"] = \
self.glade.get_widget("combo_enclevel").get_active()
new_core_config["enc_prefer_rc4"] = \
self.glade.get_widget("chk_pref_rc4").get_active()
## Bandwidth tab ##
new_core_config["max_connections_global"] = \
self.glade.get_widget(
"spin_max_connections_global").get_value_as_int()
new_core_config["max_download_speed"] = \
self.glade.get_widget("spin_max_download").get_value()
new_core_config["max_upload_speed"] = \
self.glade.get_widget("spin_max_upload").get_value()
new_core_config["max_upload_slots_global"] = \
self.glade.get_widget(
"spin_max_upload_slots_global").get_value_as_int()
new_core_config["max_half_open_connections"] = \
self.glade.get_widget("spin_max_half_open_connections").get_value_as_int()
new_core_config["max_connections_per_second"] = \
self.glade.get_widget(
"spin_max_connections_per_second").get_value_as_int()
new_core_config["max_connections_per_torrent"] = \
self.glade.get_widget(
"spin_max_connections_per_torrent").get_value_as_int()
new_core_config["max_upload_slots_per_torrent"] = \
self.glade.get_widget(
"spin_max_upload_slots_per_torrent").get_value_as_int()
new_core_config["max_upload_speed_per_torrent"] = \
self.glade.get_widget(
"spin_max_upload_per_torrent").get_value()
new_core_config["max_download_speed_per_torrent"] = \
self.glade.get_widget(
"spin_max_download_per_torrent").get_value()
new_core_config["ignore_limits_on_local_network"] = \
self.glade.get_widget("chk_ignore_limits_on_local_network").get_active()
new_core_config["rate_limit_ip_overhead"] = \
self.glade.get_widget("chk_rate_limit_ip_overhead").get_active()
## Interface tab ##
new_gtkui_config["enable_system_tray"] = \
self.glade.get_widget("chk_use_tray").get_active()
new_gtkui_config["close_to_tray"] = \
self.glade.get_widget("chk_min_on_close").get_active()
new_gtkui_config["start_in_tray"] = \
self.glade.get_widget("chk_start_in_tray").get_active()
new_gtkui_config["enable_appindicator"] = \
self.glade.get_widget("chk_enable_appindicator").get_active()
new_gtkui_config["lock_tray"] = \
self.glade.get_widget("chk_lock_tray").get_active()
passhex = sha_hash(\
self.glade.get_widget("txt_tray_password").get_text()).hexdigest()
if passhex != "c07eb5a8c0dc7bb81c217b67f11c3b7a5e95ffd7":
new_gtkui_config["tray_password"] = passhex
new_gtkui_config["classic_mode"] = \
self.glade.get_widget("chk_classic_mode").get_active()
new_gtkui_config["show_rate_in_title"] = \
self.glade.get_widget("chk_show_rate_in_title").get_active()
## Other tab ##
new_gtkui_config["show_new_releases"] = \
self.glade.get_widget("chk_show_new_releases").get_active()
new_core_config["send_info"] = \
self.glade.get_widget("chk_send_info").get_active()
new_core_config["geoip_db_location"] = \
self.glade.get_widget("entry_geoip").get_text()
## Daemon tab ##
new_core_config["daemon_port"] = \
self.glade.get_widget("spin_daemon_port").get_value_as_int()
new_core_config["allow_remote"] = \
self.glade.get_widget("chk_allow_remote_connections").get_active()
new_core_config["new_release_check"] = \
self.glade.get_widget("chk_new_releases").get_active()
## Proxy tab ##
new_core_config["proxies"] = {}
for t in ("peer", "web_seed", "tracker", "dht"):
new_core_config["proxies"][t] = {}
new_core_config["proxies"][t]["type"] = \
self.glade.get_widget("combo_proxy_type_%s" % t).get_active()
new_core_config["proxies"][t]["port"] = \
self.glade.get_widget("spin_proxy_port_%s" % t).get_value_as_int()
new_core_config["proxies"][t]["username"] = \
self.glade.get_widget("txt_proxy_username_%s" % t).get_text()
new_core_config["proxies"][t]["password"] = \
self.glade.get_widget("txt_proxy_password_%s" % t).get_text()
new_core_config["proxies"][t]["hostname"] = \
self.glade.get_widget("txt_proxy_server_%s" % t).get_text()
## Queue tab ##
new_core_config["queue_new_to_top"] = \
self.glade.get_widget("chk_queue_new_top").get_active()
new_core_config["max_active_seeding"] = \
self.glade.get_widget("spin_seeding").get_value_as_int()
new_core_config["max_active_downloading"] = \
self.glade.get_widget("spin_downloading").get_value_as_int()
new_core_config["max_active_limit"] = \
self.glade.get_widget("spin_active").get_value_as_int()
new_core_config["dont_count_slow_torrents"] = \
self.glade.get_widget("chk_dont_count_slow_torrents").get_active()
new_core_config["stop_seed_at_ratio"] = \
self.glade.get_widget("chk_seed_ratio").get_active()
new_core_config["remove_seed_at_ratio"] = \
self.glade.get_widget("chk_remove_ratio").get_active()
new_core_config["stop_seed_ratio"] = \
self.glade.get_widget("spin_share_ratio").get_value()
new_core_config["share_ratio_limit"] = \
self.glade.get_widget("spin_share_ratio_limit").get_value()
new_core_config["seed_time_ratio_limit"] = \
self.glade.get_widget("spin_seed_time_ratio_limit").get_value()
new_core_config["seed_time_limit"] = \
self.glade.get_widget("spin_seed_time_limit").get_value()
## Cache tab ##
new_core_config["cache_size"] = \
self.glade.get_widget("spin_cache_size").get_value_as_int()
new_core_config["cache_expiry"] = \
self.glade.get_widget("spin_cache_expiry").get_value_as_int()
# Run plugin hook to apply preferences
component.get("PluginManager").run_on_apply_prefs()
# GtkUI
for key in new_gtkui_config.keys():
# The values do not match so this needs to be updated
if self.gtkui_config[key] != new_gtkui_config[key]:
self.gtkui_config[key] = new_gtkui_config[key]
# Core
if client.connected():
# Only do this if we're connected to a daemon
config_to_set = {}
for key in new_core_config.keys():
# The values do not match so this needs to be updated
if self.core_config[key] != new_core_config[key]:
config_to_set[key] = new_core_config[key]
if config_to_set:
# Set each changed config value in the core
client.core.set_config(config_to_set)
client.force_call(True)
# Update the configuration
self.core_config.update(config_to_set)
if hide:
self.hide()
else:
# Re-show the dialog to make sure everything has been updated
self.show()
def hide(self):
self.glade.get_widget("port_img").hide()
self.pref_dialog.hide()
def __update_cache_status(self):
# Updates the cache status labels with the info in the dict
for widget in self.glade.get_widget_prefix("label_cache_"):
key = widget.get_name()[len("label_cache_"):]
value = self.cache_status[key]
if type(value) == float:
value = "%.2f" % value
else:
value = str(value)
widget.set_text(value)
def _on_button_cache_refresh_clicked(self, widget):
def on_get_cache_status(status):
self.cache_status = status
self.__update_cache_status()
client.core.get_cache_status().addCallback(on_get_cache_status)
def on_pref_dialog_delete_event(self, widget, event):
self.hide()
return True
def on_toggle(self, widget):
"""Handles widget sensitivity based on radio/check button values."""
try:
value = widget.get_active()
except:
return
dependents = {
"chk_show_dialog": {"chk_focus_dialog": True},
"chk_random_port": {"spin_port_min": False,
"spin_port_max": False},
"chk_random_outgoing_ports": {"spin_outgoing_port_min": False,
"spin_outgoing_port_max": False},
"chk_use_tray": {"chk_min_on_close": True,
"chk_start_in_tray": True,
"chk_enable_appindicator": True,
"chk_lock_tray": True},
"chk_lock_tray": {"txt_tray_password": True,
"password_label": True},
"radio_open_folder_custom": {"combo_file_manager": False,
"txt_open_folder_location": True},
"chk_move_completed" : {"move_completed_path_button" : True},
"chk_copy_torrent_file" : {"torrent_files_button" : True,
"chk_del_copy_torrent_file" : True},
"chk_autoadd" : {"folder_autoadd" : True},
"chk_seed_ratio" : {"spin_share_ratio": True,
"chk_remove_ratio" : True}
}
def update_dependent_widgets(name, value):
dependency = dependents[name]
for dep in dependency.keys():
depwidget = self.glade.get_widget(dep)
sensitive = [not value, value][dependency[dep]]
depwidget.set_sensitive(sensitive)
if dep in dependents:
update_dependent_widgets(dep, depwidget.get_active() and sensitive)
for key in dependents.keys():
if widget != self.glade.get_widget(key):
continue
update_dependent_widgets(key, value)
def on_button_ok_clicked(self, data):
log.debug("on_button_ok_clicked")
self.set_config(hide=True)
return True
def on_button_apply_clicked(self, data):
log.debug("on_button_apply_clicked")
self.set_config()
def on_button_cancel_clicked(self, data):
log.debug("on_button_cancel_clicked")
self.hide()
return True
def on_selection_changed(self, treeselection):
# Show the correct notebook page based on what row is selected.
(model, row) = treeselection.get_selected()
try:
self.notebook.set_current_page(model.get_value(row, 0))
except TypeError:
pass
def on_test_port_clicked(self, data):
log.debug("on_test_port_clicked")
def on_get_test(status):
if status:
self.glade.get_widget("port_img").set_from_stock(gtk.STOCK_YES, 4)
self.glade.get_widget("port_img").show()
else:
self.glade.get_widget("port_img").set_from_stock(gtk.STOCK_DIALOG_WARNING, 4)
self.glade.get_widget("port_img").show()
client.core.test_listen_port().addCallback(on_get_test)
self.glade.get_widget("port_img").set_from_file(
deluge.common.get_pixmap('loading.gif')
)
self.glade.get_widget("port_img").show()
client.force_call()
def on_plugin_toggled(self, renderer, path):
log.debug("on_plugin_toggled")
row = self.plugin_liststore.get_iter_from_string(path)
name = self.plugin_liststore.get_value(row, 0)
value = self.plugin_liststore.get_value(row, 1)
self.plugin_liststore.set_value(row, 1, not value)
if not value:
client.core.enable_plugin(name)
else:
client.core.disable_plugin(name)
component.get("PluginManager").disable_plugin(name)
def on_plugin_selection_changed(self, treeselection):
log.debug("on_plugin_selection_changed")
(model, itr) = treeselection.get_selected()
if not itr:
return
name = model[itr][0]
plugin_info = component.get("PluginManager").get_plugin_info(name)
self.glade.get_widget("label_plugin_author").set_text(plugin_info["Author"])
self.glade.get_widget("label_plugin_version").set_text(plugin_info["Version"])
self.glade.get_widget("label_plugin_email").set_text(plugin_info["Author-email"])
self.glade.get_widget("label_plugin_homepage").set_text(plugin_info["Home-page"])
self.glade.get_widget("label_plugin_details").set_text(plugin_info["Description"])
def _on_button_plugin_install_clicked(self, widget):
log.debug("_on_button_plugin_install_clicked")
chooser = gtk.FileChooserDialog(_("Select the Plugin"),
self.pref_dialog,
gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_transient_for(self.pref_dialog)
chooser.set_select_multiple(False)
chooser.set_property("skip-taskbar-hint", True)
file_filter = gtk.FileFilter()
file_filter.set_name(_("Plugin Eggs"))
file_filter.add_pattern("*." + "egg")
chooser.add_filter(file_filter)
# Run the dialog
response = chooser.run()
if response == gtk.RESPONSE_OK:
filepath = chooser.get_filename()
else:
chooser.destroy()
return
import base64
import shutil
import os.path
filename = os.path.split(filepath)[1]
shutil.copyfile(
filepath,
os.path.join(deluge.configmanager.get_config_dir(), "plugins", filename))
component.get("PluginManager").scan_for_plugins()
if not client.is_localhost():
# We need to send this plugin to the daemon
filedump = base64.encodestring(open(filepath, "rb").read())
client.core.upload_plugin(filename, filedump)
client.core.rescan_plugins()
chooser.destroy()
# We need to re-show the preferences dialog to show the new plugins
self.show()
def _on_button_rescan_plugins_clicked(self, widget):
component.get("PluginManager").scan_for_plugins()
if client.connected():
client.core.rescan_plugins()
self.show()
def _on_button_find_plugins_clicked(self, widget):
deluge.common.open_url_in_browser("http://dev.deluge-torrent.org/wiki/Plugins")
def _on_combo_proxy_type_changed(self, widget):
name = widget.get_name().replace("combo_proxy_type_", "")
proxy_type = widget.get_model()[widget.get_active()][0]
prefixes = ["txt_proxy_", "label_proxy_", "spin_proxy_"]
hides = []
shows = []
if proxy_type == "None":
hides.extend(["password", "username", "server", "port"])
elif proxy_type in ("Socksv4", "Socksv5", "HTTP"):
hides.extend(["password", "username"])
shows.extend(["server", "port"])
elif proxy_type in ("Socksv5 W/ Auth", "HTTP W/ Auth"):
shows.extend(["password", "username", "server", "port"])
for h in hides:
for p in prefixes:
w = self.glade.get_widget(p + h + "_" + name)
if w:
w.hide()
for s in shows:
for p in prefixes:
w = self.glade.get_widget(p + s + "_" + name)
if w:
w.show()
def _on_button_associate_magnet_clicked(self, widget):
common.associate_magnet_links(True)
| gpl-3.0 |
MaxParsons/amo-physics | liexperiment/raman/coherent_population_transfer_sims.py | 1 | 4059 | '''
Created on Feb 19, 2015
@author: Max
'''
import liexperiment.raman.coherent_population_transfer as cpt
import numpy as np
import matplotlib.pyplot as plt
import os
import os.path
from itertools import product
def export_figure_numerical_index(filename, fig):
head, tail = os.path.split(filename)
fig_nums = [int(fname[-8:-4]) for fname in os.listdir(head) if fname.split('_', 1)[0] == tail]
if not fig_nums:
next_num = 0
else:
next_num = np.max(np.array(fig_nums)) + 1
newname = tail + "_" + "{:0>4d}".format(next_num)
fig.savefig(os.path.join(head, newname + ".svg"))
def spectrum_constant_pulse():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
detunings = np.linspace(-2.0e6, 2.0e6, 100)
four_pops = np.zeros_like(detunings)
nbars = np.zeros_like(detunings)
raman.n_vibrational = 5;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 300.0e3
raman.anharmonicity = 26.0e3
raman.simulation_duration = 10.0e-6
raman.simulation_nsteps = 50
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(0.7)
raman.initial_state[1] = np.sqrt(0.3)
fig, ax = plt.subplots(1, 1)
fig.name = "spectrum"
ax.set_title("simulated raman spectrum\n ")
ax.set_xlabel("detuning (kHz)")
ax.set_ylabel("population in |4> (blue)\n nbar (black)")
for idx, detuning in enumerate(detunings):
print "idx = " + str(idx)
raman.constant_detuning = detuning
raman.compute_dynamics()
four_pops[idx] = raman.pops_excited[-1]
nbars[idx] = raman.nbars[-1]
ax.plot(detunings / 1.0e3, four_pops, color="b", marker="o")
ax.plot(detunings / 1.0e3, nbars, color="k", marker="o")
export_figure_numerical_index(os.path.join(fig_directory, fig.name), fig)
plt.show()
def rabi_flopping():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
raman.constant_detuning = -1.00e6
raman.n_vibrational = 5;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 100.0e3
raman.anharmonicity = 0.0e3
raman.simulation_duration = 100.0e-6
raman.simulation_nsteps = 100
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(0.7)
raman.initial_state[1] = np.sqrt(0.3)
raman.compute_dynamics()
fig, ax = plt.subplots(1, 1)
ax.set_title("populations")
ax.set_xlabel("time")
ax.set_ylabel("populations")
plt.plot(raman.times, raman.pops_excited)
plt.show()
def test():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
detunings = np.linspace(-2.0e6, 2.0e6, 30)
four_pops = np.zeros_like(detunings)
nbars = np.zeros_like(detunings)
raman.n_vibrational = 3;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 100.0e3
raman.anharmonicity = 26.0e3
raman.simulation_duration = 10.0e-6
raman.simulation_nsteps = 50
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(1.0)
raman.initial_state[1] = np.sqrt(0.0)
fig, ax = plt.subplots(1, 1)
fig.name = "spectrum"
ax.set_title("simulated raman spectrum\n ")
ax.set_xlabel("detuning (kHz)")
ax.set_ylabel("population in |4> (blue)\n nbar (black)")
raman.constant_detuning = 1.0e6
raman.compute_quantum_numbers()
print raman.hamiltonian(2.2e-6)
if __name__ == "__main__":
# test()
spectrum_constant_pulse()
# rabi_flopping()
| mit |
Muxi-Studio/guisheng2 | app/api_1_0/authentication.py | 2 | 1809 | # coding: utf-8
"""
authentication.py
~~~~~~~~~~~~~~~~~
API验证文件
"""
from flask import g, jsonify
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.login import current_user
from . import api
from ..models import User, AnonymousUser
from .errors import unauthorized, forbidden, not_found, server_error
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
""" token 验证"""
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
# 使用token无须提供用户名和密码
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
# 否则使用email查询到用户
# 然后验证密码
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
@api.before_request
def before_request():
pass
"""
error_handler decorater can help us generate json formate error easily
"""
# 403
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
# 404
@auth.error_handler
def not_found_error():
return not_found('Not found')
# 500
@auth.error_handler
def server_error_error():
return server_error('Server error')
@api.route('/token/', methods=["POST", "GET"])
@auth.login_required
def get_token():
""" get token """
if isinstance(g.current_user, AnonymousUser) or g.token_used:
return unauthorized('Invalid credentials') # => in json format
return jsonify({
'token': g.current_user.generate_auth_token(3600),
'expiration': 3600,
'id' : g.current_user.id
})
| mit |
hazrpg/calibre | src/calibre/utils/shared_file.py | 3 | 7612 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os, sys
from calibre.constants import iswindows, plugins
'''
This module defines a share_open() function which is a replacement for
python's builtin open() function.
This replacement, opens 'shareable' files on all platforms. That is files that
can be read from and written to and deleted at the same time by multiple
processes. All file handles are non-inheritable, as in Python 3, but unlike,
Python 2. Non-inheritance is atomic.
Caveats on windows: On windows sharing is co-operative, i.e. it only works if
all processes involved open the file with share_open(). Also while you can
delete a file that is open, you cannot open a new file with the same filename
until all open file handles are closed. You also cannot delete the containing
directory until all file handles are closed. To get around this, rename the
file before deleting it.
'''
speedup, err = plugins['speedup']
if not speedup:
raise RuntimeError('Failed to load the speedup plugin with error: %s' % err)
valid_modes = {'a', 'a+', 'a+b', 'ab', 'r', 'rb', 'r+', 'r+b', 'w', 'wb', 'w+', 'w+b'}
def validate_mode(mode):
return mode in valid_modes
class FlagConstants(object):
def __init__(self):
for x in 'APPEND CREAT TRUNC EXCL RDWR RDONLY WRONLY'.split():
x = 'O_' + x
setattr(self, x, getattr(os, x))
for x in 'RANDOM SEQUENTIAL TEXT BINARY'.split():
x = 'O_' + x
setattr(self, x, getattr(os, x, 0))
fc = FlagConstants()
def flags_from_mode(mode):
if not validate_mode(mode):
raise ValueError('The mode is invalid')
m = mode[0]
random = '+' in mode
binary = 'b' in mode
if m == 'a':
flags = fc.O_APPEND | fc.O_CREAT
if random:
flags |= fc.O_RDWR | fc.O_RANDOM
else:
flags |= fc.O_WRONLY | fc.O_SEQUENTIAL
elif m == 'r':
if random:
flags = fc.O_RDWR | fc.O_RANDOM
else:
flags = fc.O_RDONLY | fc.O_SEQUENTIAL
elif m == 'w':
if random:
flags = fc.O_RDWR | fc.O_RANDOM
else:
flags = fc.O_WRONLY | fc.O_SEQUENTIAL
flags |= fc.O_TRUNC | fc.O_CREAT
flags |= (fc.O_BINARY if binary else fc.O_TEXT)
return flags
if iswindows:
from numbers import Integral
import msvcrt
import win32file, pywintypes
CREATE_NEW = win32file.CREATE_NEW
CREATE_ALWAYS = win32file.CREATE_ALWAYS
OPEN_EXISTING = win32file.OPEN_EXISTING
OPEN_ALWAYS = win32file.OPEN_ALWAYS
TRUNCATE_EXISTING = win32file.TRUNCATE_EXISTING
FILE_SHARE_READ = win32file.FILE_SHARE_READ
FILE_SHARE_WRITE = win32file.FILE_SHARE_WRITE
FILE_SHARE_DELETE = win32file.FILE_SHARE_DELETE
FILE_SHARE_VALID_FLAGS = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE
FILE_ATTRIBUTE_READONLY = win32file.FILE_ATTRIBUTE_READONLY
FILE_ATTRIBUTE_NORMAL = win32file.FILE_ATTRIBUTE_NORMAL
FILE_ATTRIBUTE_TEMPORARY = win32file.FILE_ATTRIBUTE_TEMPORARY
FILE_FLAG_DELETE_ON_CLOSE = win32file.FILE_FLAG_DELETE_ON_CLOSE
FILE_FLAG_SEQUENTIAL_SCAN = win32file.FILE_FLAG_SEQUENTIAL_SCAN
FILE_FLAG_RANDOM_ACCESS = win32file.FILE_FLAG_RANDOM_ACCESS
GENERIC_READ = win32file.GENERIC_READ & 0xffffffff
GENERIC_WRITE = win32file.GENERIC_WRITE & 0xffffffff
DELETE = 0x00010000
_ACCESS_MASK = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
_ACCESS_MAP = {
os.O_RDONLY : GENERIC_READ,
os.O_WRONLY : GENERIC_WRITE,
os.O_RDWR : GENERIC_READ | GENERIC_WRITE
}
_CREATE_MASK = os.O_CREAT | os.O_EXCL | os.O_TRUNC
_CREATE_MAP = {
0 : OPEN_EXISTING,
os.O_EXCL : OPEN_EXISTING,
os.O_CREAT : OPEN_ALWAYS,
os.O_CREAT | os.O_EXCL : CREATE_NEW,
os.O_CREAT | os.O_TRUNC | os.O_EXCL : CREATE_NEW,
os.O_TRUNC : TRUNCATE_EXISTING,
os.O_TRUNC | os.O_EXCL : TRUNCATE_EXISTING,
os.O_CREAT | os.O_TRUNC : CREATE_ALWAYS
}
def raise_winerror(pywinerr):
raise WindowsError(pywinerr.winerror, (pywinerr.funcname or '') + b': ' + (pywinerr.strerror or '')), None, sys.exc_info()[2]
def os_open(path, flags, mode=0o777, share_flags=FILE_SHARE_VALID_FLAGS):
'''
Replacement for os.open() allowing moving or unlinking before closing
'''
if not isinstance(flags, Integral):
raise TypeError('flags must be an integer')
if not isinstance(mode, Integral):
raise TypeError('mode must be an integer')
if share_flags & ~FILE_SHARE_VALID_FLAGS:
raise ValueError('bad share_flags: %r' % share_flags)
access_flags = _ACCESS_MAP[flags & _ACCESS_MASK]
create_flags = _CREATE_MAP[flags & _CREATE_MASK]
attrib_flags = FILE_ATTRIBUTE_NORMAL
if flags & os.O_CREAT and mode & ~0o444 == 0:
attrib_flags = FILE_ATTRIBUTE_READONLY
if flags & os.O_TEMPORARY:
share_flags |= FILE_SHARE_DELETE
attrib_flags |= FILE_FLAG_DELETE_ON_CLOSE
access_flags |= DELETE
if flags & os.O_SHORT_LIVED:
attrib_flags |= FILE_ATTRIBUTE_TEMPORARY
if flags & os.O_SEQUENTIAL:
attrib_flags |= FILE_FLAG_SEQUENTIAL_SCAN
if flags & os.O_RANDOM:
attrib_flags |= FILE_FLAG_RANDOM_ACCESS
try:
h = win32file.CreateFileW(
path, access_flags, share_flags, None, create_flags, attrib_flags, None)
except pywintypes.error as e:
raise_winerror(e)
ans = msvcrt.open_osfhandle(h, flags | os.O_NOINHERIT)
h.Detach() # We dont want the handle to be automatically closed when h is deleted
return ans
def share_open(path, mode='r', buffering=-1):
flags = flags_from_mode(mode)
return speedup.fdopen(os_open(path, flags), path, mode, buffering)
else:
def share_open(path, mode='r', buffering=-1):
flags = flags_from_mode(mode) | speedup.O_CLOEXEC
return speedup.fdopen(os.open(path, flags), path, mode, buffering)
def raise_winerror(x):
raise NotImplementedError(), None, sys.exc_info()[2]
def test():
import repr as reprlib
def eq(x, y):
if x != y:
raise AssertionError('%s != %s' % (reprlib.repr(x), reprlib.repr(y)))
from calibre.ptempfile import TemporaryDirectory
with TemporaryDirectory() as tdir:
fname = os.path.join(tdir, 'test.txt')
with share_open(fname, 'wb') as f:
f.write(b'a' * 20 * 1024)
eq(fname, f.name)
f = share_open(fname, 'rb')
eq(f.read(1), b'a')
if iswindows:
os.rename(fname, fname+'.moved')
os.remove(fname+'.moved')
else:
os.remove(fname)
eq(f.read(1), b'a')
f2 = share_open(fname, 'w+b')
f2.write(b'b' * 10 * 1024)
f2.seek(0)
eq(f.read(10000), b'a'*10000)
eq(f2.read(100), b'b' * 100)
f3 = share_open(fname, 'rb')
eq(f3.read(100), b'b' * 100)
| gpl-3.0 |
ylitormatech/terapialaskutus | therapyinvoicing/kelainvoicing/forms.py | 1 | 4219 | from django import forms
from .models import KelaContactProfile, KelaStatement
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
from django.core.urlresolvers import reverse_lazy
class KelaStatementUpdateForm(forms.ModelForm):
class Meta:
model = KelaStatement
fields = ['date',
'companyName',
'firstName',
'lastName',
'address',
'zipCode',
'city',
'telephone',
'email',
'vatId',
'iban',
'bic',
'serviceproviderType',
'invoiceRefType',
'orderno',
'taxAdvanceType',
'taxAdvanceExplanation']
labels = {
'date': _("Tilityspäivä"),
'companyName': _("Kuntoutuspalvelutuottajan nimi"),
'firstName': _("Terapeutin etunimi"),
'lastName': _("Terapeutin sukunimi"),
'address': _("Kuntoutuspalvelutuottajan katuosoite"),
'zipCode': _("Kuntoutuspalvelutuottajan postinumero"),
'city': _("Kuntoutuspalvelutuottajan Postitoimipaikka"),
'telephone': _("Puhelinnumero"),
'email': _("Sähköposti"),
'vatId': _("Y-tunnus/Henkilötunnus"),
'iban': _("Maksuosoite: IBAN-tilinumero"),
'bic': _("BIC-pankkitunniste"),
'serviceproviderType': _("Palveluntuottajan tyyppi"),
'invoiceRefType': _("Tilitysnumeron tyyppi"),
'orderno': _("Tilitysnumero"),
'taxAdvanceType': _("Ennakonpidätysperuste"),
'taxAdvanceExplanation': _("Ennakonpidätyksen selite")
}
serviceproviderType = forms.TypedChoiceField(
label=Meta.labels['serviceproviderType'],
choices=KelaStatement.SERVICEPROVIDER_TYPE_CHOICES,
widget=forms.RadioSelect,
required=True
)
invoiceRefType = forms.TypedChoiceField(
label=Meta.labels['invoiceRefType'],
choices=KelaStatement.INVOICEREF_TYPE_CHOICES,
widget=forms.RadioSelect,
required=True
)
taxAdvanceType = forms.TypedChoiceField(
label=Meta.labels['taxAdvanceType'],
choices=KelaStatement.TAX_ADVANCE_COLLECTION_TYPE_CHOICES,
widget=forms.RadioSelect,
required=True
)
def __init__(self, *args, **kwargs):
super(KelaStatementUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = reverse_lazy('customers:customer_create')
self.helper.layout = Layout(
'date',
'companyName',
'firstName',
'lastName',
'address',
'zipCode',
'city',
'telephone',
'email',
'vatId',
'iban',
'bic',
'serviceproviderType',
'invoiceRefType',
'orderno',
'taxAdvanceType',
'taxAdvanceExplanation',
Submit("submit", _("Tallenna"), css_class="btn btn-primary btn-sm"),
)
class KelaContactProfileUpdateForm(forms.ModelForm):
class Meta:
model = KelaContactProfile
fields = [
'legalName',
'firstName',
'additionalName',
'lastName',
'address',
'zipCode',
'city',
'country',
'telephone',
'email']
labels = {
'legalName': _("Kelan toimiston nimi"),
'firstName': _("Yhteyshenkilön etunimi"),
'lastName': _("Yhteyshenkilön sukunimi"),
'address': _("Kelan laskutuksen postiosoite"),
'zipCode': _("Kelan laskutuksen postinumero"),
'city': _("Kelan laskutuksen postitoimipaikka"),
'telephone': _("Puhelinnumero"),
'email': _("Sähköposti"),
}
| bsd-3-clause |
AmedeoSapio/scapy | scapy/supersocket.py | 7 | 4511 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
SuperSocket.
"""
import socket,time
from config import conf
from data import *
from scapy.error import warning, log_runtime
class _SuperSocket_metaclass(type):
def __repr__(self):
if self.desc is not None:
return "<%s: %s>" % (self.__name__,self.desc)
else:
return "<%s>" % self.__name__
class SuperSocket:
__metaclass__ = _SuperSocket_metaclass
desc = None
closed=0
def __init__(self, family=socket.AF_INET,type=socket.SOCK_STREAM, proto=0):
self.ins = socket.socket(family, type, proto)
self.outs = self.ins
self.promisc=None
def send(self, x):
sx = str(x)
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.outs.send(sx)
def recv(self, x=MTU):
return conf.raw_layer(self.ins.recv(x))
def fileno(self):
return self.ins.fileno()
def close(self):
if self.closed:
return
self.closed=1
if self.ins != self.outs:
if self.outs and self.outs.fileno() != -1:
self.outs.close()
if self.ins and self.ins.fileno() != -1:
self.ins.close()
def sr(self, *args, **kargs):
return sendrecv.sndrcv(self, *args, **kargs)
def sr1(self, *args, **kargs):
a,b = sendrecv.sndrcv(self, *args, **kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def sniff(self, *args, **kargs):
return sendrecv.sniff(opened_socket=self, *args, **kargs)
class L3RawSocket(SuperSocket):
desc = "Layer 3 using Raw sockets (PF_INET/SOCK_RAW)"
def __init__(self, type = ETH_P_IP, filter=None, iface=None, promisc=None, nofilter=0):
self.outs = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.outs.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
if iface is not None:
self.ins.bind((iface, type))
def recv(self, x=MTU):
pkt, sa_ll = self.ins.recvfrom(x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
if sa_ll[3] in conf.l2types:
cls = conf.l2types[sa_ll[3]]
lvl = 2
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
lvl = 3
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s" % (sa_ll[0],sa_ll[1],sa_ll[3],cls.name))
lvl = 3
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
if lvl == 2:
pkt = pkt.payload
if pkt is not None:
from arch import get_last_packet_timestamp
pkt.time = get_last_packet_timestamp(self.ins)
return pkt
def send(self, x):
try:
sx = str(x)
x.sent_time = time.time()
self.outs.sendto(sx,(x.dst,0))
except socket.error,msg:
log_runtime.error(msg)
class SimpleSocket(SuperSocket):
desc = "wrapper arround a classic socket"
def __init__(self, sock):
self.ins = sock
self.outs = sock
class StreamSocket(SimpleSocket):
desc = "transforms a stream socket into a layer 2"
def __init__(self, sock, basecls=None):
if basecls is None:
basecls = conf.raw_layer
SimpleSocket.__init__(self, sock)
self.basecls = basecls
def recv(self, x=MTU):
pkt = self.ins.recv(x, socket.MSG_PEEK)
x = len(pkt)
if x == 0:
raise socket.error((100,"Underlying stream socket tore down"))
pkt = self.basecls(pkt)
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
while pad is not None and not isinstance(pad, NoPayload):
x -= len(pad.load)
pad = pad.payload
self.ins.recv(x)
return pkt
if conf.L3socket is None:
conf.L3socket = L3RawSocket
import arch
import sendrecv
| gpl-2.0 |
magenta/ddsp | ddsp/training/decoders.py | 1 | 7498 | # Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of decoder layers."""
from ddsp import core
from ddsp.training import nn
import gin
import tensorflow as tf
tfkl = tf.keras.layers
# ------------------ Decoders --------------------------------------------------
@gin.register
class RnnFcDecoder(nn.OutputSplitsLayer):
"""RNN and FC stacks for f0 and loudness."""
def __init__(self,
rnn_channels=512,
rnn_type='gru',
ch=512,
layers_per_stack=3,
input_keys=('ld_scaled', 'f0_scaled', 'z'),
output_splits=(('amps', 1), ('harmonic_distribution', 40)),
**kwargs):
super().__init__(
input_keys=input_keys, output_splits=output_splits, **kwargs)
stack = lambda: nn.FcStack(ch, layers_per_stack)
# Layers.
self.input_stacks = [stack() for k in self.input_keys]
self.rnn = nn.Rnn(rnn_channels, rnn_type)
self.out_stack = stack()
def compute_output(self, *inputs):
# Initial processing.
inputs = [stack(x) for stack, x in zip(self.input_stacks, inputs)]
# Run an RNN over the latents.
x = tf.concat(inputs, axis=-1)
x = self.rnn(x)
x = tf.concat(inputs + [x], axis=-1)
# Final processing.
return self.out_stack(x)
@gin.register
class MidiDecoder(nn.DictLayer):
"""Decodes MIDI notes (& velocities) to f0 (& loudness)."""
def __init__(self,
net=None,
f0_residual=True,
center_loudness=True,
norm=True,
**kwargs):
"""Constructor."""
super().__init__(**kwargs)
self.net = net
self.f0_residual = f0_residual
self.center_loudness = center_loudness
self.dense_out = tfkl.Dense(2)
self.norm = nn.Normalize('layer') if norm else None
def call(self, z_pitch, z_vel, z=None) -> ['f0_midi', 'loudness']:
"""Forward pass for the MIDI decoder.
Args:
z_pitch: Tensor containing encoded pitch in MIDI scale. [batch, time, 1].
z_vel: Tensor containing encoded velocity in MIDI scale. [batch, time, 1].
z: Additional non-MIDI latent tensor. [batch, time, n_z]
Returns:
f0_midi, loudness: Reconstructed f0 and loudness.
"""
# pylint: disable=unused-argument
# x = tf.concat([z_pitch, z_vel], axis=-1) # TODO(jesse): Allow velocity.
x = z_pitch
x = self.net(x) if z is None else self.net([x, z])
if self.norm is not None:
x = self.norm(x)
x = self.dense_out(x)
f0_midi = x[..., 0:1]
loudness = x[..., 1:2]
if self.f0_residual:
f0_midi += z_pitch
if self.center_loudness:
loudness = loudness * 30.0 - 70.0
return f0_midi, loudness
@gin.register
class MidiToHarmonicDecoder(nn.DictLayer):
"""Decodes MIDI notes (& velocities) to f0, amps, hd, noise."""
def __init__(self,
net=None,
f0_residual=True,
norm=True,
output_splits=(('f0_midi', 1),
('amplitudes', 1),
('harmonic_distribution', 60),
('magnitudes', 65)),
**kwargs):
"""Constructor."""
self.output_splits = output_splits
self.n_out = sum([v[1] for v in output_splits])
output_keys = [v[0] for v in output_splits] + ['f0_hz']
super().__init__(output_keys=output_keys, **kwargs)
# Layers.
self.net = net
self.f0_residual = f0_residual
self.dense_out = tfkl.Dense(self.n_out)
self.norm = nn.Normalize('layer') if norm else None
def call(self, z_pitch, z_vel, z=None):
"""Forward pass for the MIDI decoder.
Args:
z_pitch: Tensor containing encoded pitch in MIDI scale. [batch, time, 1].
z_vel: Tensor containing encoded velocity in MIDI scale. [batch, time, 1].
z: Additional non-MIDI latent tensor. [batch, time, n_z]
Returns:
A dictionary to feed into a processor group.
"""
# pylint: disable=unused-argument
# x = tf.concat([z_pitch, z_vel], axis=-1) # TODO(jesse): Allow velocity.
x = z_pitch
x = self.net(x) if z is None else self.net([x, z])
if self.norm is not None:
x = self.norm(x)
x = self.dense_out(x)
outputs = nn.split_to_dict(x, self.output_splits)
if self.f0_residual:
outputs['f0_midi'] += z_pitch
outputs['f0_hz'] = core.midi_to_hz(outputs['f0_midi'])
return outputs
@gin.register
class DilatedConvDecoder(nn.OutputSplitsLayer):
"""WaveNet style 1-D dilated convolution with optional conditioning."""
def __init__(self,
ch=256,
kernel_size=3,
layers_per_stack=5,
stacks=2,
dilation=2,
norm_type='layer',
resample_stride=1,
stacks_per_resample=1,
resample_after_convolve=True,
input_keys=('ld_scaled', 'f0_scaled'),
output_splits=(('amps', 1), ('harmonic_distribution', 60)),
conditioning_keys=('z'),
precondition_stack=None,
spectral_norm=False,
ortho_init=False,
**kwargs):
"""Constructor, combines input_keys and conditioning_keys."""
self.conditioning_keys = ([] if conditioning_keys is None else
list(conditioning_keys))
input_keys = list(input_keys) + self.conditioning_keys
super().__init__(input_keys, output_splits, **kwargs)
# Conditioning.
self.n_conditioning = len(self.conditioning_keys)
self.conditional = bool(self.conditioning_keys)
if not self.conditional and precondition_stack is not None:
raise ValueError('You must specify conditioning keys if you specify'
'a precondition stack.')
# Layers.
self.precondition_stack = precondition_stack
self.dilated_conv_stack = nn.DilatedConvStack(
ch=ch,
kernel_size=kernel_size,
layers_per_stack=layers_per_stack,
stacks=stacks,
dilation=dilation,
norm_type=norm_type,
resample_type='upsample' if resample_stride > 1 else None,
resample_stride=resample_stride,
stacks_per_resample=stacks_per_resample,
resample_after_convolve=resample_after_convolve,
conditional=self.conditional,
spectral_norm=spectral_norm,
ortho_init=ortho_init)
def _parse_inputs(self, inputs):
"""Split x and z inputs and run preconditioning."""
if self.conditional:
x = tf.concat(inputs[:-self.n_conditioning], axis=-1)
z = tf.concat(inputs[-self.n_conditioning:], axis=-1)
if self.precondition_stack is not None:
z = self.precondition_stack(z)
return [x, z]
else:
return tf.concat(inputs, axis=-1)
def compute_output(self, *inputs):
stack_inputs = self._parse_inputs(inputs)
return self.dilated_conv_stack(stack_inputs)
| apache-2.0 |
UOMx/edx-platform | common/lib/xmodule/xmodule/tests/test_split_test_module.py | 71 | 23236 | """
Tests for the Split Testing Module
"""
import ddt
import lxml
from mock import Mock, patch
from fs.memoryfs import MemoryFS
from xmodule.partitions.tests.test_partitions import StaticPartitionService, PartitionTestCase, MockUserPartitionScheme
from xmodule.tests.xml import factories as xml
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests import get_test_system
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW
from xmodule.validation import StudioValidationMessage
from xmodule.split_test_module import SplitTestDescriptor, SplitTestFields, get_split_user_partitions
from xmodule.partitions.partitions import Group, UserPartition
class SplitTestModuleFactory(xml.XmlImportFactory):
"""
Factory for generating SplitTestModules for testing purposes
"""
tag = 'split_test'
class SplitTestUtilitiesTest(PartitionTestCase):
"""
Tests for utility methods related to split_test module.
"""
def test_split_user_partitions(self):
"""
Tests the get_split_user_partitions helper method.
"""
first_random_partition = UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
)
second_random_partition = UserPartition(
0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')],
self.random_scheme
)
all_partitions = [
first_random_partition,
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
),
second_random_partition
]
self.assertEqual(
[first_random_partition, second_random_partition],
get_split_user_partitions(all_partitions)
)
class SplitTestModuleTest(XModuleXmlImportTest, PartitionTestCase):
"""
Base class for all split_module tests.
"""
def setUp(self):
super(SplitTestModuleTest, self).setUp()
self.course_id = 'test_org/test_course_number/test_run'
# construct module
course = xml.CourseFactory.build()
sequence = xml.SequenceFactory.build(parent=course)
split_test = SplitTestModuleFactory(
parent=sequence,
attribs={
'user_partition_id': '0',
'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' # pylint: disable=line-too-long
}
)
xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0')
xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1')
self.course = self.process_xml(course)
self.course_sequence = self.course.get_children()[0]
self.module_system = get_test_system()
self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access
self.course.runtime.export_fs = MemoryFS()
user = Mock(username='ma', email='ma@edx.org', is_staff=False, is_active=True)
self.partitions_service = StaticPartitionService(
[
self.user_partition,
UserPartition(
1, 'second_partition', 'Second Partition',
[Group("0", 'abel'), Group("1", 'baker'), Group("2", 'charlie')],
MockUserPartitionScheme()
)
],
user=user,
course_id=self.course.id,
track_function=Mock(name='track_function'),
)
self.module_system._services['partitions'] = self.partitions_service # pylint: disable=protected-access
self.split_test_module = self.course_sequence.get_children()[0]
self.split_test_module.bind_for_student(
self.module_system,
user.id
)
@ddt.ddt
class SplitTestModuleLMSTest(SplitTestModuleTest):
"""
Test the split test module
"""
@ddt.data((0, 'split_test_cond0'), (1, 'split_test_cond1'))
@ddt.unpack
def test_child(self, user_tag, child_url_name):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertEquals(self.split_test_module.child_descriptor.url_name, child_url_name)
@ddt.data((0, 'HTML FOR GROUP 0'), (1, 'HTML FOR GROUP 1'))
@ddt.unpack
def test_get_html(self, user_tag, child_content):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertIn(
child_content,
self.module_system.render(self.split_test_module, STUDENT_VIEW).content
)
@ddt.data(0, 1)
def test_child_missing_tag_value(self, _user_tag):
# If user_tag has a missing value, we should still get back a valid child url
self.assertIn(self.split_test_module.child_descriptor.url_name, ['split_test_cond0', 'split_test_cond1'])
@ddt.data(100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
def test_child_persist_new_tag_value_when_tag_missing(self, _user_tag):
# If a user_tag has a missing value, a group should be saved/persisted for that user.
# So, we check that we get the same url_name when we call on the url_name twice.
# We run the test ten times so that, if our storage is failing, we'll be most likely to notice it.
self.assertEquals(
self.split_test_module.child_descriptor.url_name,
self.split_test_module.child_descriptor.url_name
)
# Patch the definition_to_xml for the html children.
@patch('xmodule.html_module.HtmlDescriptor.definition_to_xml')
def test_export_import_round_trip(self, def_to_xml):
# The HtmlDescriptor definition_to_xml tries to write to the filesystem
# before returning an xml object. Patch this to just return the xml.
def_to_xml.return_value = lxml.etree.Element('html')
# Mock out the process_xml
# Expect it to return a child descriptor for the SplitTestDescriptor when called.
self.module_system.process_xml = Mock()
# Write out the xml.
xml_obj = self.split_test_module.definition_to_xml(MemoryFS())
self.assertEquals(xml_obj.get('user_partition_id'), '0')
self.assertIsNotNone(xml_obj.get('group_id_to_child'))
# Read the xml back in.
fields, children = SplitTestDescriptor.definition_from_xml(xml_obj, self.module_system)
self.assertEquals(fields.get('user_partition_id'), '0')
self.assertIsNotNone(fields.get('group_id_to_child'))
self.assertEquals(len(children), 2)
class SplitTestModuleStudioTest(SplitTestModuleTest):
"""
Unit tests for how split test interacts with Studio.
"""
@patch('xmodule.split_test_module.SplitTestDescriptor.group_configuration_url', return_value='http://example.com')
def test_render_author_view(self, group_configuration_url):
"""
Test the rendering of the Studio author view.
"""
def create_studio_context(root_xblock):
"""
Context for rendering the studio "author_view".
"""
return {
'reorderable_items': set(),
'root_xblock': root_xblock,
}
# The split_test module should render both its groups when it is the root
context = create_studio_context(self.split_test_module)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
# When rendering as a child, it shouldn't render either of its groups
context = create_studio_context(self.course_sequence)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertNotIn('HTML FOR GROUP 0', html)
self.assertNotIn('HTML FOR GROUP 1', html)
# The "Create Missing Groups" button should be rendered when groups are missing
context = create_studio_context(self.split_test_module)
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
def test_group_configuration_url(self):
"""
Test creation of correct Group Configuration URL.
"""
mocked_course = Mock(advanced_modules=['split_test'])
mocked_modulestore = Mock()
mocked_modulestore.get_course.return_value = mocked_course
self.split_test_module.system.modulestore = mocked_modulestore
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
expected_url = '/group_configurations/edX/xml_test_course/101#0'
self.assertEqual(expected_url, self.split_test_module.group_configuration_url)
def test_editable_settings(self):
"""
Test the setting information passed back from editable_metadata_fields.
"""
editable_metadata_fields = self.split_test_module.editable_metadata_fields
self.assertIn(SplitTestDescriptor.display_name.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.due.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.user_partitions.name, editable_metadata_fields)
# user_partition_id will always appear in editable_metadata_settings, regardless
# of the selected value.
self.assertIn(SplitTestDescriptor.user_partition_id.name, editable_metadata_fields)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.split_test_module.non_editable_metadata_fields
self.assertIn(SplitTestDescriptor.due, non_editable_metadata_fields)
self.assertIn(SplitTestDescriptor.user_partitions, non_editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.display_name, non_editable_metadata_fields)
def test_available_partitions(self):
"""
Tests that the available partitions are populated correctly when editable_metadata_fields are called
"""
self.assertEqual([], SplitTestDescriptor.user_partition_id.values)
# user_partitions is empty, only the "Not Selected" item will appear.
self.split_test_module.user_partition_id = SplitTestFields.no_partition_selected['value']
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
# Populate user_partitions and call editable_metadata_fields again
self.split_test_module.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
),
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
)
]
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
# Try again with a selected partition and verify that there is no option for "No Selection"
self.split_test_module.user_partition_id = 0
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(0, partitions[0]['value'])
self.assertEqual("first_partition", partitions[0]['display_name'])
# Finally try again with an invalid selected partition and verify that "No Selection" is an option
self.split_test_module.user_partition_id = 999
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
def test_active_and_inactive_children(self):
"""
Tests the active and inactive children returned for different split test configurations.
"""
split_test_module = self.split_test_module
children = split_test_module.get_children()
# Verify that a split test has no active children if it has no specified user partition.
split_test_module.user_partition_id = -1
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
# Verify that all the children are returned as active for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, children)
self.assertEqual(inactive_children, [])
# Verify that a split_test does not return inactive children in the active children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test ignores misconfigured children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test referring to a non-existent user partition has no active children
self.split_test_module.user_partition_id = 2
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
def test_validation_messages(self):
"""
Test the validation messages produced for different split test configurations.
"""
split_test_module = self.split_test_module
def verify_validation_message(message, expected_message, expected_message_type,
expected_action_class=None, expected_action_label=None,
expected_action_runtime_event=None):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
if expected_action_class:
self.assertEqual(message.action_class, expected_action_class)
else:
self.assertFalse(hasattr(message, "action_class"))
if expected_action_label:
self.assertEqual(message.action_label, expected_action_label)
else:
self.assertFalse(hasattr(message, "action_label"))
if expected_action_runtime_event:
self.assertEqual(message.action_runtime_event, expected_action_runtime_event)
else:
self.assertFalse(hasattr(message, "action_runtime_event"))
def verify_summary_message(general_validation, expected_message, expected_message_type):
"""
Verify that the general validation message has the expected validation message and type.
"""
self.assertEqual(general_validation.text, expected_message)
self.assertEqual(general_validation.type, expected_message_type)
# Verify the messages for an unconfigured user partition
split_test_module.user_partition_id = -1
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 0)
verify_validation_message(
validation.summary,
u"The experiment is not associated with a group configuration.",
StudioValidationMessage.NOT_CONFIGURED,
'edit-button',
u"Select a Group Configuration",
)
# Verify the messages for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
validation = split_test_module.validate_split_test()
self.assertTrue(validation)
self.assertIsNone(split_test_module.general_validation_message(), None)
# Verify the messages for a split test with too few groups
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test with children that are not associated with any group
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.WARNING
)
# Verify the messages for a split test with both missing and inactive children
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 2)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_validation_message(
validation.messages[1],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
# With two messages of type error and warning priority given to error.
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test referring to a non-existent user partition
split_test_module.user_partition_id = 2
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a deleted group configuration. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the message for a split test referring to a non-random user partition
split_test_module.user_partitions = [
UserPartition(
10, 'incorrect_partition', 'Non Random Partition', [Group("0", 'alpha'), Group("2", 'gamma')],
scheme=self.non_random_scheme
)
]
split_test_module.user_partition_id = 10
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a group configuration that is not supported for experiments. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
| agpl-3.0 |
iago1460/django-radio | radioco/apps/programmes/models.py | 1 | 12144 | # Radioco - Broadcasting Radio Recording Scheduling system.
# Copyright (C) 2014 Iago Veloso Abalo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytz
from bs4 import BeautifulSoup
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import FieldError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save, pre_save
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import datetime
from radioco.apps.radioco.utils import field_has_changed
from radioco.apps.schedules.utils import next_dates
if hasattr(settings, 'PROGRAMME_LANGUAGES'):
PROGRAMME_LANGUAGES = settings.PROGRAMME_LANGUAGES
else:
PROGRAMME_LANGUAGES = settings.LANGUAGES
class Programme(models.Model):
class Meta:
verbose_name = _('programme')
verbose_name_plural = _('programmes')
permissions = (("see_all_programmes", "Can see all programmes"),)
CATEGORY_CHOICES = (
('Arts', _('Arts')),
('Business', _('Business')),
('Comedy', _('Comedy')),
('Education', _('Education')),
('Games & Hobbies', _('Games & Hobbies')),
('Government & Organizations', _('Government & Organizations')),
('Health', _('Health')),
('Kids & Family', _('Kids & Family')),
('Music', _('Music')),
('News & Politics', _('News & Politics')),
('Religion & Spirituality', _('Religion & Spirituality')),
('Science & Medicine', _('Science & Medicine')),
('Society & Culture', _('Society & Culture')),
('Sports & Recreation', _('Sports & Recreation')),
('Technology', _('Technology')),
('TV & Film', _('TV & Film')),
)
name = models.CharField(
max_length=100, unique=True, verbose_name=_("name")
)
announcers = models.ManyToManyField(
User, blank=True, through='Role', verbose_name=_("announcers")
)
synopsis = RichTextUploadingField(blank=True, verbose_name=_("synopsis"))
photo = models.ImageField(
upload_to='photos/', default='defaults/default-programme-photo.jpg', verbose_name=_("photo")
)
language = models.CharField(
default=PROGRAMME_LANGUAGES[0][0], verbose_name=_("language"),
choices=[(k_v[0], _(k_v[1])) for k_v in PROGRAMME_LANGUAGES], max_length=7
)
# XXX ensure not decreasing
current_season = models.PositiveIntegerField(
validators=[MinValueValidator(1)], verbose_name=_("current season")
)
category = models.CharField(
blank=True, null=True, max_length=50, choices=CATEGORY_CHOICES, verbose_name=_("category")
)
slug = models.SlugField(max_length=100, unique=True,
help_text=_("Please DON'T change this value. It's used to build URL's."))
_runtime = models.PositiveIntegerField(
validators=[MinValueValidator(1)], verbose_name=_("runtime"), help_text=_("In minutes."))
start_date = models.DateField(blank=True, null=True, verbose_name=_('start date'))
end_date = models.DateField(blank=True, null=True, verbose_name=_('end date'))
@property
def synopsis_text(self):
return BeautifulSoup(self.synopsis, "html.parser").text
@property
def runtime(self):
if not self._runtime:
raise FieldError(_('Runtime not set'))
return datetime.timedelta(minutes=self._runtime)
@runtime.setter
def runtime(self, value):
self._runtime = value
@property
def start_dt(self):
if not self.start_date:
return None
tz = timezone.get_default_timezone()
return tz.localize(datetime.datetime.combine(self.start_date, datetime.time())).astimezone(pytz.utc)
@property
def end_dt(self):
if not self.end_date:
return None
tz = timezone.get_default_timezone()
return tz.localize(datetime.datetime.combine(self.end_date, datetime.time(23, 59, 59))).astimezone(pytz.utc)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Programme, self).save(*args, **kwargs)
def rearrange_episodes(self, after, calendar):
"""
Update the issue_date of episodes from a given date
"""
episodes = Episode.objects.unfinished(self, after)
dates = next_dates(calendar, self, after)
# Further dates and episodes available -> re-order
while True:
try:
date = next(dates)
episode = next(episodes)
except StopIteration:
break
else:
episode.issue_date = date
episode.save()
# No further dates available -> unschedule
while True:
try:
episode = next(episodes)
except StopIteration:
break
else:
episode.issue_date = None
episode.save()
def get_absolute_url(self):
return reverse('programmes:detail', args=[self.slug])
def __str__(self):
return "%s" % (self.name)
def update_schedule_performance(programme):
for schedule in programme.schedule_set.all():
# schedule.effective_start_dt = calculate_effective_schedule_start_dt(schedule)
# schedule.effective_end_dt = calculate_effective_schedule_end_dt(schedule)
schedule.save() # TODO: improve performance, update objects in bulk? We need custom logic on save
def update_schedule_if_dt_has_changed(sender, instance, **kwargs):
if field_has_changed(instance, 'start_date') or field_has_changed(instance, 'end_date'): # TODO: improve
update_schedule_performance(instance)
pre_save.connect(
update_schedule_if_dt_has_changed, sender=Programme, dispatch_uid='update_schedule_if_dt_has_changed')
class EpisodeManager(models.Manager):
# XXX this is not atomic, transaction?
def create_episode(self, date, programme, last_episode=None, episode=None):
if not last_episode:
last_episode = self.last(programme)
season = programme.current_season
if last_episode and last_episode.season == season:
number_in_season = last_episode.number_in_season + 1
else:
number_in_season = 1
if episode:
episode.programme = programme
episode.issue_date = date
episode.season = season
episode.number_in_season = number_in_season
else:
episode = Episode(
programme=programme, issue_date=date, season=season, number_in_season=number_in_season
)
episode.save()
for role in Role.objects.filter(programme=programme):
Participant.objects.create(
person=role.person, episode=episode, role=role.role, description=role.description
)
return episode
@staticmethod
def last(programme):
return (programme.episode_set
.order_by("-season", "-number_in_season")
.first())
@staticmethod
def unfinished(programme, after=None):
if not after:
after = timezone.now()
return programme.episode_set.filter(
Q(issue_date__gte=after) | Q(issue_date=None)).order_by("season", "number_in_season").iterator()
class Episode(models.Model):
class Meta:
unique_together = (('season', 'number_in_season', 'programme'),)
verbose_name = _('episode')
verbose_name_plural = _('episodes')
permissions = (("see_all_episodes", "Can see all episodes"),)
objects = EpisodeManager()
title = models.CharField(max_length=100, blank=True, null=True, verbose_name=_("title"))
people = models.ManyToManyField(User, blank=True, through='Participant', verbose_name=_("people"))
programme = models.ForeignKey(Programme, verbose_name=_("programme"), on_delete=models.CASCADE)
summary = RichTextUploadingField(blank=True, verbose_name=_("summary"))
issue_date = models.DateTimeField(blank=True, null=True, db_index=True, verbose_name=_('issue date'))
season = models.PositiveIntegerField(validators=[MinValueValidator(1)], verbose_name=_("season"))
number_in_season = models.PositiveIntegerField(validators=[MinValueValidator(1)], verbose_name=_("No. in season"))
@property
def summary_text(self):
return BeautifulSoup(self.summary, "html.parser").text
# FIXME: this is not true for archived episodes
@property
def runtime(self):
return self.programme.runtime
def get_absolute_url(self):
return reverse('programmes:episode_detail', args=[self.programme.slug, self.season, self.number_in_season])
def __str__(self):
if self.title:
return "%sx%s %s" % (self.season, self.number_in_season, self.title)
return "%sx%s %s" % (self.season, self.number_in_season, self.programme)
class Participant(models.Model):
person = models.ForeignKey(User, verbose_name=_("person"), on_delete=models.CASCADE)
episode = models.ForeignKey(Episode, verbose_name=_("episode"), on_delete=models.CASCADE)
role = models.CharField(max_length=60, blank=True, null=True, verbose_name=_("role"))
description = models.TextField(blank=True, verbose_name=_("description"))
class Meta:
unique_together = ('person', 'episode')
verbose_name = _('contributor')
verbose_name_plural = _('contributors')
permissions = (
("see_all_participants", "Can see all participants"),
)
def __str__(self):
return "%s: %s" % (self.episode, self.person.username)
class Role(models.Model):
person = models.ForeignKey(User, verbose_name=_("person"), on_delete=models.CASCADE)
programme = models.ForeignKey(Programme, verbose_name=_("programme"), on_delete=models.CASCADE)
role = models.CharField(max_length=60, blank=True, null=True, verbose_name=_("role"))
description = models.TextField(blank=True, verbose_name=_("description"))
date_joined = models.DateField(auto_now_add=True)
class Meta:
unique_together = ('person', 'programme')
verbose_name = _('role')
verbose_name_plural = _('roles')
permissions = (
("see_all_roles", "Can see all roles"),
)
def __str__(self):
return "%s: %s" % (self.programme.name, self.person.username)
class Podcast(models.Model):
episode = models.OneToOneField(Episode, primary_key=True, related_name='podcast')
url = models.CharField(max_length=2048)
mime_type = models.CharField(max_length=20)
length = models.PositiveIntegerField() # bytes
duration = models.PositiveIntegerField(validators=[MinValueValidator(1)])
podcast_file = models.FileField(upload_to='podcasts/', blank=True)
def save(self, *args, **kwargs):
if not self.url and self.podcast_file:
from radioco.apps.global_settings.models import PodcastConfiguration
podcast_config = PodcastConfiguration.get_global()
self.url = podcast_config.url_source.rstrip('/') + self.podcast_file.url
super(Podcast, self).save(*args, **kwargs)
def get_absolute_url(self):
return self.episode.get_absolute_url()
| gpl-3.0 |
knifenomad/django | tests/servers/tests.py | 257 | 5907 | # -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import contextlib
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase, override_settings
from django.utils._os import upath
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
@override_settings(ROOT_URLCONF='servers.urls', **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], text_type)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/example_view/')) as f:
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/static/example_static_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_no_collectstatic_emulation(self):
"""
Test that LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
try:
self.urlopen('/static/another_app/another_app_static_file.txt')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response (got %d)' % err.code)
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/media/example_media_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
with contextlib.closing(self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/model_view/')) as f:
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
| bsd-3-clause |
bingshuika/hearthbreaker-new | hearthbreaker/tags/event.py | 2 | 3690 | from hearthbreaker.tags.base import MinionEvent, PlayerEvent
from hearthbreaker.tags.condition import MinionIsNotTarget, CardIsNotTarget
from hearthbreaker.tags.selector import FriendlyPlayer
class SpellCast(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("spell_cast", condition, player)
def bind(self, target, func):
for player in self.player.get_players(target.player):
self.__target__ = target
self.__func__ = func
player.bind("card_played", self.__action__)
def unbind(self, target, func):
for player in self.player.get_players(target.player):
player.unbind("card_played", self.__action__)
def __action__(self, card, index):
if card.is_spell():
if self.condition:
super().__action__(card, index)
else:
self.__func__(card, index)
class CardPlayed(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("card_played", condition, player)
class CardUsed(PlayerEvent):
def __init__(self, condition=CardIsNotTarget(), player=FriendlyPlayer()):
super().__init__("card_used", condition, player)
class AfterAdded(PlayerEvent):
def __init__(self, condition=MinionIsNotTarget(), player=FriendlyPlayer()):
super().__init__("after_added", condition, player)
class TurnEnded(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("turn_ended", condition, player)
class TurnStarted(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("turn_started", condition, player)
class MinionDied(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("minion_died", condition, player)
class MinionPlaced(PlayerEvent):
def __init__(self, condition=MinionIsNotTarget(), player=FriendlyPlayer()):
super().__init__("minion_placed", condition, player)
class MinionSummoned(PlayerEvent):
def __init__(self, condition=MinionIsNotTarget(), player=FriendlyPlayer()):
super().__init__("minion_summoned", condition, player)
class CharacterDamaged(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("character_damaged", condition, player)
class CharacterHealed(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("character_healed", condition, player)
class SecretRevealed(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("secret_revealed", condition, player)
class CharacterAttack(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("character_attack", condition, player)
class ArmorIncreased(PlayerEvent):
def __init__(self, condition=None, player=FriendlyPlayer()):
super().__init__("armor_increased", condition, player)
class Attack(MinionEvent):
def __init__(self, condition=None):
super().__init__("attack", condition)
class AttackCompleted(MinionEvent):
def __init__(self):
super().__init__("attack_completed")
class DidDamage(MinionEvent):
def __init__(self):
super().__init__("did_damage")
class WeaponDestroyed(MinionEvent):
def __init__(self):
super().__init__("weapon_destroyed")
class Damaged(MinionEvent):
def __init__(self):
super().__init__("damaged")
class Drawn(MinionEvent):
def __init__(self):
super().__init__("drawn")
| mit |
wohlert/agnosia | classifiers/dnn.py | 2 | 5199 | """
network.py
Provides different network models.
"""
import numpy as np
np.random.seed(1337)
from keras.callbacks import Callback
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Merge, Reshape, Input, merge
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import accuracy_score
def create_single_frame(input_shape):
"""
Creates a CNN for a single image frame.
"""
model = Sequential()
# 4 32*3*3 convolution layers
model.add(Convolution2D(32, 3, 3, border_mode="valid", input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 2 64*3*3 convolution layers
model.add(Convolution2D(64, 3, 3, border_mode="valid"))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 1 128*3*3 convolution layer
model.add(Convolution2D(128, 3, 3, border_mode="valid"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
return model
def create_multi_frame(cnn_shape, frames):
"""
Create 7 parallel CNNs that converge into a recurrent
LSTM layer to make a prediction.
"""
model = Sequential()
# Create 7 CNNs and merge the outputs
convnets = [create_single_frame(cnn_shape) for _ in range(frames)]
model.add(Merge(convnets, mode="concat"))
model.add(Reshape((128, frames)))
# LSTM layer - only keep last prediction
model.add(LSTM(128, input_dim=frames, input_length=128, return_sequences=False))
model.add(Activation("tanh"))
# Fully connected layer
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation("relu"))
# Prediction layer
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation("sigmoid"))
return model
def functional_model(image_shape, frames):
"""
Creates a neural network using the functional API for Keras.
"""
conv_input = Input(shape=image_shape)
# 3 32*3*3 convolution layers
conv1 = Convolution2D(32, 3, 3, border_mode="valid", activation="relu")(conv_input)
conv1 = Convolution2D(32, 3, 3, activation="relu")(conv1)
conv1 = Convolution2D(32, 3, 3, activation="relu")(conv1)
max1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv1)
# 2 64*3*3 convolution layers
conv2 = Convolution2D(64, 3, 3, border_mode="valid", activation="relu")(max1)
conv2 = Convolution2D(64, 3, 3, activation="relu")(conv2)
max2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)
# 1 128*3*3 convolution layer
conv3 = Convolution2D(128, 3, 3, border_mode="valid", activation="relu")(max2)
max3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv3)
# Model for convolutional network
convnet = Model(input=conv_input, output=max3)
# 7 input layers for convnerts
inputs = [Input(shape=image_shape) for _ in range(frames)]
# 7 convnets
convnets = [convnet(input) for input in inputs]
merge_nets = merge(convnets, mode="concat")
reshape = Reshape((128, 7))(merge_nets)
lstm = LSTM(128, input_dim=frames, input_length=128, return_sequences=False, activation="tanh")(reshape)
# dropout1 = Dropout(0.5)(lstm)
dense1 = Dense(512, activation="relu")(lstm)
# dropout2 = Dropout(0.5)(dense1)
prediction = Dense(1, activation="sigmoid")(dense1)
return Model(input=inputs, output=prediction)
# Load data
from utils import random_split
X_train, X_test, y_train, y_test = random_split("images/", 32, 7)
_, frames, channels, width, height = np.shape(X_train)
# Reshape to match CNN shapes
X_train = list(X_train.reshape(frames, -1, channels, width, height))
X_test = list(X_test.reshape(frames, -1, channels, width, height))
image_shape = (channels, width, height)
# Create model
model = functional_model(image_shape, frames)
model.compile(loss='binary_crossentropy',
metrics=['accuracy'],
optimizer="adam")
#SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# Create callbacks
checkpoint = ModelCheckpoint("weights.{epoch:02d}-{val_loss:.2f}.hdf5")
early_stop = EarlyStopping(patience=2)
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
callbacks = [
# checkpoint,
# early_stop,
LossHistory()]
# Fit model
batch_size = 32
nb_epochs = 10
history = model.fit(X_train, y_train.ravel(), batch_size=batch_size,
nb_epoch=nb_epochs, callbacks=callbacks)
# Evaluate model
prediction = model.predict(X_test, batch_size=batch_size)
accuracy = accuracy_score(prediction, y_test.ravel())
print(accuracy)
| apache-2.0 |
marc-sensenich/ansible | test/units/modules/network/vyos/test_vyos_command.py | 45 | 4175 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.vyos import vyos_command
from units.modules.utils import set_module_args
from .vyos_module import TestVyosModule, load_fixture
class TestVyosCommandModule(TestVyosModule):
module = vyos_command
def setUp(self):
super(TestVyosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.vyos.vyos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestVyosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item['command']
except ValueError:
command = item
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_vyos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Version: VyOS'))
def test_vyos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Version: VyOS'))
def test_vyos_command_wait_for(self):
wait_for = 'result[0] contains "VyOS maintainers"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_vyos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_vyos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_vyos_command_match_any(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_vyos_command_match_all(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "maintainers@vyos.net"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_vyos_command_match_all_failure(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
Slach/tessera | setup.py | 4 | 1483 | from setuptools import setup, find_packages
name = 'tessera'
_locals = {}
execfile('%s/_version.py' % name, _locals)
version = _locals['__version__']
setup(
name=name,
version=version,
description='Powerful multipurpose dashboard server',
license='Apache',
author='Urban Airship',
url='https://github.com/urbanairship/tessera',
packages=find_packages(),
include_package_data=True, # Ensure templates, etc get pulled into sdists
install_requires=[
x.strip()
for x in open('requirements.txt').readlines()
if x and not x.startswith('#')
],
entry_points = {
'console_scripts': [
'tessera-init = tessera.main:init',
'tessera = tessera.main:run'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Framework :: Flask',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
],
)
| apache-2.0 |
ymcagodme/Norwalk-Judo | django/contrib/databrowse/plugins/fieldchoices.py | 252 | 3856 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.text import capfirst
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
import urllib
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
# If field_filter is given, it should be a callable that takes a
# Django database Field instance and returns True if that field should
# be included. If field_filter is None, that all fields will be used.
self.field_filter = field_filter
def field_dict(self, model):
"""
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
"""
if self.field_filter:
return dict([(f.name, f) for f in model._meta.fields if self.field_filter(f)])
else:
return dict([(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(u'<p class="filter"><strong>View by:</strong> %s</p>' % \
u', '.join(['<a href="fields/%s/">%s</a>' % (f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values()]))
def urls(self, plugin_name, easy_instance_field):
if easy_instance_field.field in self.field_dict(easy_instance_field.model.model).values():
field_value = smart_str(easy_instance_field.raw_value)
return [mark_safe(u'%s%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
urllib.quote(field_value, safe='')))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no fields with choices, there's no point in going
# further.
if not self.fields:
raise http.Http404('The requested model has no fields.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/', 1)
if self.fields.has_key(url_bits[0]):
return self.field_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k: k.verbose_name)
return render_to_response('databrowse/fieldchoice_homepage.html', {'root_url': self.site.root_url, 'model': easy_model, 'field_list': field_list})
def field_view(self, request, field, value=None):
easy_model = EasyModel(self.site, self.model)
easy_field = easy_model.field(field.name)
if value is not None:
obj_list = easy_model.objects(**{field.name: value})
return render_to_response('databrowse/fieldchoice_detail.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'value': value, 'object_list': obj_list})
obj_list = [v[field.name] for v in self.model._default_manager.distinct().order_by(field.name).values(field.name)]
return render_to_response('databrowse/fieldchoice_list.html', {'root_url': self.site.root_url, 'model': easy_model, 'field': easy_field, 'object_list': obj_list})
| bsd-3-clause |
kercon/Image-Aplication-PY | PYLIB/appJar/lib/nanojpeg.py | 5 | 31188 | import array, sys
# NanoJPEG -- KeyJ's Tiny Baseline JPEG Decoder
# version 1.1 (2010-03-05)
# by Martin J. Fiedler <martin.fiedler@gmx.net>
# http://keyj.emphy.de/nanojpeg/
#
# This software is published under the terms of KeyJ's Research License,
# version 0.2. Usage of this software is subject to the following conditions:
# 0. There's no warranty whatsoever. The author(s) of this software can not
# be held liable for any damages that occur when using this software.
# 1. This software may be used freely for both non-commercial and commercial
# purposes.
# 2. This software may be redistributed freely as long as no fees are charged
# for the distribution and this license information is included.
# 3. This software may be modified freely except for this license information,
# which must not be changed in any way.
# 4. If anything other than configuration, indentation or comments have been
# altered in the code, the original author(s) must receive a copy of the
# modified code.
#
# Ported to python by Andras Suller <suller.andras@gmail.com>
###############################################################################
## DOCUMENTATION SECTION ##
## read this if you want to know what this is all about ##
###############################################################################
# INTRODUCTION
# ============
#
# This is a minimal decoder for baseline JPEG images. It accepts memory dumps
# of JPEG files as input and generates either 8-bit grayscale or packed 24-bit
# RGB images as output. It does not parse JFIF or Exif headers; all JPEG files
# are assumed to be either grayscale or YCbCr. CMYK or other color spaces are
# not supported. All YCbCr subsampling schemes with power-of-two ratios are
# supported, as are restart intervals. Progressive or lossless JPEG is not
# supported.
# Summed up, NanoJPEG should be able to decode all images from digital cameras
# and most common forms of other non-progressive JPEG images.
# The decoder is not optimized for speed, it's optimized for simplicity and
# small code. Image quality should be at a reasonable level. A bicubic chroma
# upsampling filter ensures that subsampled YCbCr images are rendered in
# decent quality. The decoder is not meant to deal with broken JPEG files in
# a graceful manner; if anything is wrong with the bitstream, decoding will
# simply fail.
# The code should work with every modern C compiler without problems and
# should not emit any warnings. It uses only (at least) 32-bit integer
# arithmetic and is supposed to be endianness independent and 64-bit clean.
# However, it is not thread-safe.
# COMPILE-TIME CONFIGURATION
# ==========================
#
# The following aspects of NanoJPEG can be controlled with preprocessor
# defines:
#
# _NJ_EXAMPLE_PROGRAM = Compile a main() function with an example
# program.
# _NJ_INCLUDE_HEADER_ONLY = Don't compile anything, just act as a header
# file for NanoJPEG. Example:
# #define _NJ_INCLUDE_HEADER_ONLY
# #include "nanojpeg.c"
# int main(void) {
# njInit();
# // your code here
# njDone();
# }
# NJ_USE_LIBC=1 = Use the malloc(), free(), memset() and memcpy()
# functions from the standard C library (default).
# NJ_USE_LIBC=0 = Don't use the standard C library. In this mode,
# external functions njAlloc(), njFreeMem(),
# njFillMem() and njCopyMem() need to be defined
# and implemented somewhere.
# NJ_USE_WIN32=0 = Normal mode (default).
# NJ_USE_WIN32=1 = If compiling with MSVC for Win32 and
# NJ_USE_LIBC=0, NanoJPEG will use its own
# implementations of the required C library
# functions (default if compiling with MSVC and
# NJ_USE_LIBC=0).
# NJ_CHROMA_FILTER=1 = Use the bicubic chroma upsampling filter
# (default).
# NJ_CHROMA_FILTER=0 = Use simple pixel repetition for chroma upsampling
# (bad quality, but faster and less code).
# API
# ===
#
# For API documentation, read the "header section" below.
# EXAMPLE
# =======
#
# A few pages below, you can find an example program that uses NanoJPEG to
# convert JPEG files into PGM or PPM. To compile it, use something like
# gcc -O3 -D_NJ_EXAMPLE_PROGRAM -o nanojpeg nanojpeg.c
# You may also add -std=c99 -Wall -Wextra -pedantic -Werror, if you want :)
###############################################################################
## HEADER SECTION ##
## copy and pase this into nanojpeg.h if you want ##
###############################################################################
#ifndef _NANOJPEG_H
#define _NANOJPEG_H
# nj_result_t: Result codes for njDecode().
NJ_OK = 0 # no error, decoding successful
NJ_NO_JPEG = 1 # not a JPEG file
NJ_UNSUPPORTED = 2 # unsupported format
NJ_OUT_OF_MEM = 3 # out of memory
NJ_INTERNAL_ERR = 4 # internal error
NJ_SYNTAX_ERROR = 5 # syntax error
__NJ_FINISHED = 6 # used internally, will never be reported
# njInit: Initialize NanoJPEG.
# For safety reasons, this should be called at least one time before using
# using any of the other NanoJPEG functions.
#void njInit(void);
# njDecode: Decode a JPEG image.
# Decodes a memory dump of a JPEG file into internal buffers.
# Parameters:
# jpeg = The pointer to the memory dump.
# size = The size of the JPEG file.
# Return value: The error code in case of failure, or NJ_OK (zero) on success.
#nj_result_t njDecode(const void* jpeg, const int size);
# njGetWidth: Return the width (in pixels) of the most recently decoded
# image. If njDecode() failed, the result of njGetWidth() is undefined.
#int njGetWidth(void);
# njGetHeight: Return the height (in pixels) of the most recently decoded
# image. If njDecode() failed, the result of njGetHeight() is undefined.
#int njGetHeight(void);
# njIsColor: Return 1 if the most recently decoded image is a color image
# (RGB) or 0 if it is a grayscale image. If njDecode() failed, the result
# of njGetWidth() is undefined.
#int njIsColor(void);
# njGetImage: Returns the decoded image data.
# Returns a pointer to the most recently image. The memory layout it byte-
# oriented, top-down, without any padding between lines. Pixels of color
# images will be stored as three consecutive bytes for the red, green and
# blue channels. This data format is thus compatible with the PGM or PPM
# file formats and the OpenGL texture formats GL_LUMINANCE8 or GL_RGB8.
# If njDecode() failed, the result of njGetImage() is undefined.
#unsigned char* njGetImage(void);
# njGetImageSize: Returns the size (in bytes) of the image data returned
# by njGetImage(). If njDecode() failed, the result of njGetImageSize() is
# undefined.
#int njGetImageSize(void);
# njDone: Uninitialize NanoJPEG.
# Resets NanoJPEG's internal state and frees all memory that has been
# allocated at run-time by NanoJPEG. It is still possible to decode another
# image after a njDone() call.
#void njDone(void);
#endif//_NANOJPEG_H
###############################################################################
## CONFIGURATION SECTION ##
## adjust the default settings for the NJ_ defines here ##
###############################################################################
NJ_USE_LIBC = 1
NJ_USE_WIN32 = 0
NJ_CHROMA_FILTER = 1
###############################################################################
## EXAMPLE PROGRAM ##
## just define _NJ_EXAMPLE_PROGRAM to compile this (requires NJ_USE_LIBC) ##
###############################################################################
# #ifdef _NJ_EXAMPLE_PROGRAM
# #include <stdio.h>
# #include <stdlib.h>
# #include <string.h>
# int main(int argc, char* argv[]) {
# int size;
# char *buf;
# FILE *f;
# if (argc < 2) {
# printf("Usage: %s <input.jpg> [<output.ppm>]\n", argv[0]);
# return 2;
# }
# f = fopen(argv[1], "rb");
# if (!f) {
# printf("Error opening the input file.\n");
# return 1;
# }
# fseek(f, 0, SEEK_END);
# size = (int) ftell(f);
# buf = malloc(size);
# fseek(f, 0, SEEK_SET);
# size = (int) fread(buf, 1, size, f);
# fclose(f);
# njInit();
# if (njDecode(buf, size)) {
# printf("Error decoding the input file.\n");
# return 1;
# }
# f = fopen((argc > 2) ? argv[2] : (njIsColor() ? "nanojpeg_out.ppm" : "nanojpeg_out.pgm"), "wb");
# if (!f) {
# printf("Error opening the output file.\n");
# return 1;
# }
# fprintf(f, "P%d\n%d %d\n255\n", njIsColor() ? 6 : 5, njGetWidth(), njGetHeight());
# fwrite(njGetImage(), 1, njGetImageSize(), f);
# fclose(f);
# njDone();
# return 0;
# }
#endif
###############################################################################
## IMPLEMENTATION SECTION ##
## you may stop reading here ##
###############################################################################
#ifndef _NJ_INCLUDE_HEADER_ONLY
# typedef struct _nj_code {
# unsigned char bits, code;
# } nj_vlc_code_t;
class nj_vlc_code_t(object):
def __init__(self):
self.bits = 0
self.code = 0
# typedef struct _nj_cmp {
# int cid;
# int ssx, ssy;
# int width, height;
# int stride;
# int qtsel;
# int actabsel, dctabsel;
# int dcpred;
# unsigned char *pixels;
# } nj_component_t;
class nj_component_t(object):
def __init__(self):
self.cid = 0
self.ssx = 0
self.ssy= 0
self.width = 0
self.height = 0
self.stride = 0
self.qtsel = 0
self.actabsel = 0
self.dctabsel = 0
self.dcpred = 0
self.pixels = None
# typedef struct _nj_ctx {
# nj_result_t error;
# const unsigned char *pos;
# int size;
# int length;
# int width, height;
# int mbwidth, mbheight;
# int mbsizex, mbsizey;
# int ncomp;
# nj_component_t comp[3];
# int qtused, qtavail;
# unsigned char qtab[4][64];
# nj_vlc_code_t vlctab[4][65536];
# int buf, bufbits;
# int block[64];
# int rstinterval;
# unsigned char *rgb;
# } nj_context_t;
class nj_context_t(object):
def init(self):
self.error = 0
self.spos = None # new param. it stores the string what is indexed by pos
self.pos = 0
self.size = 0
self.length = 0
self.width = 0
self.height = 0
self.mbwidth = 0
self.mbheight = 0
self.mbsizex = 0
self.mbsizey = 0
self.ncomp = 0
self.comp = [nj_component_t(), nj_component_t(), nj_component_t()]
self.qtused = 0
self.qtavail = 0
self.qtab = [[0] * 64, [0] * 64, [0] * 64, [0] * 64]
# nj_vlc_code_t vlctab[4][65536] = None
self.vlctab = []
for n in range(4):
self.vlctab.append([nj_vlc_code_t() for i in range(65536)])
self.buf = 0
self.bufbits = 0
self.block = [0] * 64
self.rstinterval = 0
self.rgb = None
# static nj_context_t nj;
nj = nj_context_t()
njZZ = [ 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18,
11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35,
42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45,
38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 ]
def njClip(x):
if x < 0: return 0
if x > 0xFF: return 0xFF
return x
W1 = 2841
W2 = 2676
W3 = 2408
W5 = 1609
W6 = 1108
W7 = 565
def njRowIDCT(blk, p):
x1 = blk[p + 4] << 11
x2 = blk[p + 6]
x3 = blk[p + 2]
x4 = blk[p + 1]
x5 = blk[p + 7]
x6 = blk[p + 5]
x7 = blk[p + 3]
if (not (x1 | x2 | x3 | x4 | x5 | x6 | x7)):
v = blk[p + 0] << 3
blk[p + 0] = v
blk[p + 1] = v
blk[p + 2] = v
blk[p + 3] = v
blk[p + 4] = v
blk[p + 5] = v
blk[p + 6] = v
blk[p + 7] = v
return
x0 = (blk[p + 0] << 11) + 128
x8 = W7 * (x4 + x5)
x4 = x8 + (W1 - W7) * x4
x5 = x8 - (W1 + W7) * x5
x8 = W3 * (x6 + x7)
x6 = x8 - (W3 - W5) * x6
x7 = x8 - (W3 + W5) * x7
x8 = x0 + x1
x0 -= x1
x1 = W6 * (x3 + x2)
x2 = x1 - (W2 + W6) * x2
x3 = x1 + (W2 - W6) * x3
x1 = x4 + x6
x4 -= x6
x6 = x5 + x7
x5 -= x7
x7 = x8 + x3
x8 -= x3
x3 = x0 + x2
x0 -= x2
x2 = (181 * (x4 + x5) + 128) >> 8
x4 = (181 * (x4 - x5) + 128) >> 8
blk[p + 0] = (x7 + x1) >> 8
blk[p + 1] = (x3 + x2) >> 8
blk[p + 2] = (x0 + x4) >> 8
blk[p + 3] = (x8 + x6) >> 8
blk[p + 4] = (x8 - x6) >> 8
blk[p + 5] = (x0 - x4) >> 8
blk[p + 6] = (x3 - x2) >> 8
blk[p + 7] = (x7 - x1) >> 8
#blk was a char *, but we need to use an array and an index instead.
#sout is an extra parameter: it is the string what we need to modify,
#and out is the position inside sout (index)
def njColIDCT(blk, p, sout, out, stride):
x1 = blk[p + 8*4] << 8
x2 = blk[p + 8*6]
x3 = blk[p + 8*2]
x4 = blk[p + 8*1]
x5 = blk[p + 8*7]
x6 = blk[p + 8*5]
x7 = blk[p + 8*3]
if (not (x1 | x2 | x3 | x4 | x5 | x6 | x7)):
x1 = njClip(((blk[p + 0] + 32) >> 6) + 128)
x0 = 8
while x0:
sout[out] = x1
out += stride
x0 -= 1
return
x0 = (blk[p + 0] << 8) + 8192
x8 = W7 * (x4 + x5) + 4
x4 = (x8 + (W1 - W7) * x4) >> 3
x5 = (x8 - (W1 + W7) * x5) >> 3
x8 = W3 * (x6 + x7) + 4
x6 = (x8 - (W3 - W5) * x6) >> 3
x7 = (x8 - (W3 + W5) * x7) >> 3
x8 = x0 + x1
x0 -= x1
x1 = W6 * (x3 + x2) + 4
x2 = (x1 - (W2 + W6) * x2) >> 3
x3 = (x1 + (W2 - W6) * x3) >> 3
x1 = x4 + x6
x4 -= x6
x6 = x5 + x7
x5 -= x7
x7 = x8 + x3
x8 -= x3
x3 = x0 + x2
x0 -= x2
x2 = (181 * (x4 + x5) + 128) >> 8
x4 = (181 * (x4 - x5) + 128) >> 8
sout[out] = njClip(((x7 + x1) >> 14) + 128)
out += stride
sout[out] = njClip(((x3 + x2) >> 14) + 128)
out += stride
sout[out] = njClip(((x0 + x4) >> 14) + 128)
out += stride
sout[out] = njClip(((x8 + x6) >> 14) + 128)
out += stride
sout[out] = njClip(((x8 - x6) >> 14) + 128)
out += stride
sout[out] = njClip(((x0 - x4) >> 14) + 128)
out += stride
sout[out] = njClip(((x3 - x2) >> 14) + 128)
out += stride
sout[out] = njClip(((x7 - x1) >> 14) + 128)
def njShowBits(bits):
if (not bits): return 0
while (nj.bufbits < bits):
if (nj.size <= 0):
nj.buf = (nj.buf << 8) | 0xFF
nj.bufbits += 8
continue
newbyte = nj.spos[nj.pos]
nj.pos += 1
nj.size -= 1
nj.bufbits += 8
nj.buf = (nj.buf << 8) | newbyte
if (newbyte == 0xFF):
if (nj.size):
marker = nj.spos[nj.pos]
nj.pos += 1
nj.size -= 1
if marker == 0xD9:
nj.size = 0
elif marker != 0:
if ((marker & 0xF8) != 0xD0):
raise Exception(NJ_SYNTAX_ERROR)
else:
nj.buf = (nj.buf << 8) | marker
nj.bufbits += 8
else:
raise Exception(NJ_SYNTAX_ERROR)
nj.buf = nj.buf & ((1 << nj.bufbits) - 1)
return (nj.buf >> (nj.bufbits - bits)) & ((1 << bits) - 1)
def njSkipBits(bits):
if (nj.bufbits < bits):
njShowBits(bits)
nj.bufbits -= bits
def njGetBits(bits):
res = njShowBits(bits)
njSkipBits(bits)
return res
def njByteAlign():
nj.bufbits &= 0xF8
def njSkip(count):
nj.pos += count
nj.size -= count
nj.length -= count
if (nj.size < 0): raise Exception(NJ_SYNTAX_ERROR)
def njDecode16(pos):
return (nj.spos[pos] << 8) | nj.spos[pos + 1]
def njDecodeLength():
if (nj.size < 2):
raise Exception(NJ_SYNTAX_ERROR)
nj.length = njDecode16(nj.pos)
if (nj.length > nj.size):
raise Exception(NJ_SYNTAX_ERROR)
njSkip(2)
def njSkipMarker():
njDecodeLength()
njSkip(nj.length)
def njDecodeSOF():
ssxmax = 0
ssymax = 0
njDecodeLength()
if (nj.length < 9):
raise Exception(NJ_SYNTAX_ERROR)
if (nj.spos[nj.pos] != 8):
raise Exception(NJ_UNSUPPORTED)
nj.height = njDecode16(nj.pos + 1)
nj.width = njDecode16(nj.pos + 3)
nj.ncomp = nj.spos[nj.pos + 5]
njSkip(6)
if nj.ncomp != 1 and nj.ncomp != 3:
raise Exception(NJ_UNSUPPORTED)
if (nj.length < (nj.ncomp * 3)):
raise Exception(NJ_SYNTAX_ERROR)
i = 0
while i < nj.ncomp:
c = nj.comp[i]
c.cid = nj.spos[nj.pos]
c.ssx = nj.spos[nj.pos + 1] >> 4
if not c.ssx:
raise Exception(NJ_SYNTAX_ERROR)
if (c.ssx & (c.ssx - 1)):
raise Exception(NJ_UNSUPPORTED) # non-power of two
c.ssy = nj.spos[nj.pos + 1] & 15
if not c.ssy:
raise Exception(NJ_SYNTAX_ERROR)
if (c.ssy & (c.ssy - 1)):
raise Exception(NJ_UNSUPPORTED) # non-power of two
c.qtsel = nj.spos[nj.pos + 2]
if c.qtsel & 0xFC:
raise Exception(NJ_SYNTAX_ERROR)
njSkip(3)
nj.qtused |= 1 << c.qtsel
if (c.ssx > ssxmax): ssxmax = c.ssx
if (c.ssy > ssymax): ssymax = c.ssy
i += 1
nj.mbsizex = ssxmax << 3
nj.mbsizey = ssymax << 3
nj.mbwidth = (nj.width + nj.mbsizex - 1) // nj.mbsizex
nj.mbheight = (nj.height + nj.mbsizey - 1) // nj.mbsizey
i = 0
while i < nj.ncomp:
c = nj.comp[i]
c.width = (nj.width * c.ssx + ssxmax - 1) // ssxmax
c.stride = (c.width + 7)
c.stride = c.stride & 0x7FFFFFF8
c.height = (nj.height * c.ssy + ssymax - 1) // ssymax
c.stride = nj.mbwidth * nj.mbsizex * c.ssx // ssxmax
if (((c.width < 3) and (c.ssx != ssxmax)) or ((c.height < 3) and (c.ssy != ssymax))):
raise Exception(NJ_UNSUPPORTED)
c.pixels = [0] * (c.stride * (nj.mbheight * nj.mbsizey * c.ssy // ssymax))
i += 1
if (nj.ncomp == 3):
nj.rgb = [0] * (nj.width * nj.height * nj.ncomp)
njSkip(nj.length)
def njDecodeDHT():
counts = [0] * 16
njDecodeLength()
while (nj.length >= 17):
i = nj.spos[nj.pos]
if (i & 0xEC):
raise Exception(NJ_SYNTAX_ERROR)
if (i & 0x02):
raise Exception(NJ_UNSUPPORTED)
i = (i | (i >> 3)) & 3 # combined DC/AC + tableid value
for codelen in range(1, 17): # 1 to 16
counts[codelen - 1] = nj.spos[nj.pos + codelen]
njSkip(17)
vlc = 0
remain = 65536
spread = 65536
for codelen in range(1, 17): # 1 to 16
spread >>= 1
currcnt = counts[codelen - 1]
if not currcnt: continue
if (nj.length < currcnt):
raise Exception(NJ_SYNTAX_ERROR)
remain -= currcnt << (16 - codelen)
if (remain < 0):
raise Exception(NJ_SYNTAX_ERROR)
for ii in range(currcnt):
code = nj.spos[nj.pos + ii]
j = spread
while j:
nj.vlctab[i][vlc].bits = codelen
nj.vlctab[i][vlc].code = code
vlc += 1
j -= 1
njSkip(currcnt)
while remain:
remain -= 1
nj.vlctab[i][vlc].bits = 0
vlc += 1
if (nj.length):
raise Exception(NJ_SYNTAX_ERROR)
def njDecodeDQT():
njDecodeLength()
while (nj.length >= 65):
i = nj.spos[nj.pos]
if (i & 0xFC):
raise Exception(NJ_SYNTAX_ERROR)
nj.qtavail |= 1 << i
for j in range(64):
nj.qtab[i][j] = nj.spos[nj.pos + j + 1]
njSkip(65)
if (nj.length):
raise Exception(NJ_SYNTAX_ERROR)
def njDecodeDRI():
njDecodeLength()
if (nj.length < 2):
raise Exception(NJ_SYNTAX_ERROR)
nj.rstinterval = njDecode16(nj.pos)
njSkip(nj.length)
#code is an array with one element, since we need to return the code to the caller
def njGetVLC(vlc, code):
value = njShowBits(16)
bits = vlc[value].bits
if not bits:
raise Exception(NJ_SYNTAX_ERROR)
njSkipBits(bits)
value = vlc[value].code
if code: code[0] = value
bits = value & 15
if not bits: return 0
value = njGetBits(bits)
if (value < (1 << (bits - 1))):
value += ((-1) << bits) + 1
return value
#sout is a new parameter, because we need to modify the passed in array, so
#out is now just the index in out
def njDecodeBlock(c, sout, out):
code = [0]
value = 0
coef = 0
for i in range(len(nj.block)):
nj.block[i] = 0
c.dcpred += njGetVLC(nj.vlctab[c.dctabsel], None)
nj.block[0] = c.dcpred * nj.qtab[c.qtsel][0]
while True: # do {
value = njGetVLC(nj.vlctab[c.actabsel], code);
if not code[0]: break # EOB
if (not (code[0] & 0x0F) and (code[0] != 0xF0)):
raise Exception(NJ_SYNTAX_ERROR)
coef += (code[0] >> 4) + 1
if coef > 63:
raise Exception(NJ_SYNTAX_ERROR)
nj.block[njZZ[coef]] = value * nj.qtab[c.qtsel][coef]
# } while (coef < 63);
if coef >= 63: break
coef = 0
while coef < 64:
njRowIDCT(nj.block, coef)
coef += 8
for coef in range(8):
njColIDCT(nj.block, coef, sout, out + coef, c.stride)
def njDecodeScan():
rstcount = nj.rstinterval
nextrst = 0
# nj_component_t* c;
njDecodeLength()
if (nj.length < (4 + 2 * nj.ncomp)):
raise Exception(NJ_SYNTAX_ERROR)
if (nj.spos[nj.pos] != nj.ncomp):
raise Exception(NJ_UNSUPPORTED)
njSkip(1)
i = 0
while (i < nj.ncomp):
c = nj.comp[i]
if (nj.spos[nj.pos] != c.cid):
raise Exception(NJ_SYNTAX_ERROR)
if (nj.spos[nj.pos + 1] & 0xEE):
raise Exception(NJ_SYNTAX_ERROR)
c.dctabsel = nj.spos[nj.pos + 1] >> 4
c.actabsel = (nj.spos[nj.pos + 1] & 1) | 2
njSkip(2)
i += 1
if (nj.spos[nj.pos] or (nj.spos[nj.pos + 1] != 63) or nj.spos[nj.pos + 2]):
raise Exception(NJ_UNSUPPORTED)
njSkip(nj.length)
mbx = 0
mby = 0
while True:
i = 0
while (i < nj.ncomp):
c = nj.comp[i]
sby = 0
while sby < c.ssy:
sbx = 0
while sbx < c.ssx:
njDecodeBlock(c, c.pixels, ((mby * c.ssy + sby) * c.stride + mbx * c.ssx + sbx) << 3)
if nj.error:
return
sbx += 1
sby += 1
i += 1
mbx += 1
if mbx >= nj.mbwidth:
mbx = 0
mby += 1
if mby >= nj.mbheight: break
rstcount -= 1
if (nj.rstinterval and not rstcount):
njByteAlign()
i = njGetBits(16)
if (((i & 0xFFF8) != 0xFFD0) or ((i & 7) != nextrst)):
raise Exception(NJ_SYNTAX_ERROR)
nextrst = (nextrst + 1) & 7
rstcount = nj.rstinterval
for i in range(3):
nj.comp[i].dcpred = 0
nj.error = __NJ_FINISHED
#if NJ_CHROMA_FILTER
CF4A = -9
CF4B = 111
CF4C = 29
CF4D = -3
CF3A = 28
CF3B = 109
CF3C = -9
CF3X = 104
CF3Y = 27
CF3Z = -3
CF2A = 139
CF2B = -11
def CF(x):
return njClip(((x) + 64) >> 7)
def njUpsampleH(c):
xmax = c.width - 3
out = [0] * ((c.width * c.height) << 1)
lin = 0
lout = 0
y = c.height
while y:
out[lout + 0] = CF(CF2A * c.pixels[lin + 0] + CF2B * c.pixels[lin + 1])
out[lout + 1] = CF(CF3X * c.pixels[lin + 0] + CF3Y * c.pixels[lin + 1] + CF3Z * c.pixels[lin + 2])
out[lout + 2] = CF(CF3A * c.pixels[lin + 0] + CF3B * c.pixels[lin + 1] + CF3C * c.pixels[lin + 2])
for x in range(xmax):
out[lout + (x << 1) + 3] = CF(CF4A * c.pixels[lin + x] + CF4B * c.pixels[lin + x + 1] + CF4C * c.pixels[lin + x + 2] + CF4D * c.pixels[lin + x + 3])
out[lout + (x << 1) + 4] = CF(CF4D * c.pixels[lin + x] + CF4C * c.pixels[lin + x + 1] + CF4B * c.pixels[lin + x + 2] + CF4A * c.pixels[lin + x + 3])
lin += c.stride
lout += c.width << 1
out[lout - 3] = CF(CF3A * c.pixels[lin - 1] + CF3B * c.pixels[lin - 2] + CF3C * c.pixels[lin - 3])
out[lout - 2] = CF(CF3X * c.pixels[lin - 1] + CF3Y * c.pixels[lin - 2] + CF3Z * c.pixels[lin - 3])
out[lout - 1] = CF(CF2A * c.pixels[lin - 1] + CF2B * c.pixels[lin - 2])
y -= 1
c.width <<= 1
c.stride = c.width
c.pixels = out
def njUpsampleV(c):
w = c.width
s1 = c.stride
s2 = s1 + s1
out = [0] * ((c.width * c.height) << 1)
for x in range(w):
cin = x
cout = x
out[cout] = CF(CF2A * c.pixels[cin] + CF2B * c.pixels[cin + s1])
cout += w
out[cout] = CF(CF3X * c.pixels[cin] + CF3Y * c.pixels[cin + s1] + CF3Z * c.pixels[cin + s2])
cout += w
out[cout] = CF(CF3A * c.pixels[cin] + CF3B * c.pixels[cin + s1] + CF3C * c.pixels[cin + s2])
cout += w
cin += s1
y = c.height - 3
while y:
out[cout] = CF(CF4A * c.pixels[cin - s1] + CF4B * c.pixels[cin] + CF4C * c.pixels[cin + s1] + CF4D * c.pixels[cin + s2])
cout += w
out[cout] = CF(CF4D * c.pixels[cin - s1] + CF4C * c.pixels[cin] + CF4B * c.pixels[cin + s1] + CF4A * c.pixels[cin + s2])
cout += w
cin += s1
y -= 1
cin += s1
out[cout] = CF(CF3A * c.pixels[cin] + CF3B * c.pixels[cin - s1] + CF3C * c.pixels[cin - s2])
cout += w
out[cout] = CF(CF3X * c.pixels[cin] + CF3Y * c.pixels[cin - s1] + CF3Z * c.pixels[cin - s2])
cout += w
out[cout] = CF(CF2A * c.pixels[cin] + CF2B * c.pixels[cin - s1])
c.height <<= 1
c.stride = c.width
c.pixels = out
#else
# NJ_INLINE void njUpsample(nj_component_t* c) {
# int x, y, xshift = 0, yshift = 0;
# unsigned char *out, *lin, *lout;
# while (c->width < nj.width) { c->width <<= 1; ++xshift; }
# while (c->height < nj.height) { c->height <<= 1; ++yshift; }
# out = njAllocMem(c->width * c->height);
# if (!out) njThrow(NJ_OUT_OF_MEM);
# lin = c->pixels;
# lout = out;
# for (y = 0; y < c->height; ++y) {
# lin = &c->pixels[(y >> yshift) * c->stride];
# for (x = 0; x < c->width; ++x)
# lout[x] = lin[x >> xshift];
# lout += c->width;
# }
# c->stride = c->width;
# njFreeMem(c->pixels);
# c->pixels = out;
# }
#endif
def njConvert():
for i in range(nj.ncomp):
c = nj.comp[i]
if NJ_CHROMA_FILTER:
while ((c.width < nj.width) or (c.height < nj.height)):
if c.width < nj.width: njUpsampleH(c)
if nj.error: return
if c.height < nj.height: njUpsampleV(c)
if nj.error: return
else:
if ((c.width < nj.width) or (c.height < nj.height)):
njUpsample(c)
if ((c.width < nj.width) or (c.height < nj.height)):
raise Exception(NJ_INTERNAL_ERR)
return
if nj.ncomp == 3:
# convert to RGB
prgb = 0
py = 0
pcb = 0
pcr = 0
yy = nj.height
#print( 'nj.width: %s, nj.height: %s, strides: %s, %s, %s, lengths: %s, %s, %s, first few bytes: %s, %s, %s' % \
# (nj.width, nj.height, nj.comp[0].stride, nj.comp[1].stride, nj.comp[2].stride,
# len(nj.comp[0].pixels), len(nj.comp[1].pixels), len(nj.comp[2].pixels),
# nj.comp[0].pixels[0], nj.comp[0].pixels[1], nj.comp[0].pixels[2]))
while yy:
for x in range(nj.width):
y = nj.comp[0].pixels[py + x] << 8
# print 'len(nj.comp): %s, len(nj.comp[1].pixels): %s, pcb + x: %s, yy: %s, x: %s' % \
# (len(nj.comp), len(nj.comp[1].pixels), pcb + x, yy, x)
cb = nj.comp[1].pixels[pcb + x] - 128
cr = nj.comp[2].pixels[pcr + x] - 128
nj.rgb[prgb] = njClip((y + 359 * cr + 128) >> 8)
prgb += 1
nj.rgb[prgb] = njClip((y - 88 * cb - 183 * cr + 128) >> 8)
prgb += 1
nj.rgb[prgb] = njClip((y + 454 * cb + 128) >> 8)
prgb += 1
py += nj.comp[0].stride
pcb += nj.comp[1].stride
pcr += nj.comp[2].stride
yy -= 1
elif (nj.comp[0].width != nj.comp[0].stride):
# grayscale -> only remove stride
# unsigned char *pin = &nj.comp[0].pixels[nj.comp[0].stride];
# unsigned char *pout = &nj.comp[0].pixels[nj.comp[0].width];
# int y;
# for (y = nj.comp[0].height - 1; y; --y) {
# njCopyMem(pout, pin, nj.comp[0].width);
# pin += nj.comp[0].stride;
# pout += nj.comp[0].width;
# }
# nj.comp[0].stride = nj.comp[0].width;
pass
def njInit():
# njFillMem(&nj, 0, sizeof(nj_context_t));
nj.init()
def njDone():
pass
def njDecode(jpeg, size):
njDone()
nj.spos = jpeg
nj.pos = 0
nj.size = size & 0x7FFFFFFF
if (nj.size < 2): return NJ_NO_JPEG
if ((nj.spos[nj.pos] ^ 0xFF) | (nj.spos[nj.pos + 1] ^ 0xD8)): return NJ_NO_JPEG
njSkip(2)
while not nj.error:
if ((nj.size < 2) or (nj.spos[nj.pos] != 0xFF)):
return NJ_SYNTAX_ERROR
njSkip(2)
m = nj.spos[nj.pos - 1]
if m == 0xC0: njDecodeSOF()
elif m == 0xC4: njDecodeDHT()
elif m == 0xDB: njDecodeDQT()
elif m == 0xDD: njDecodeDRI()
elif m == 0xDA: njDecodeScan()
elif m == 0xFE: njSkipMarker()
elif (m & 0xF0) == 0xE0:
njSkipMarker()
else:
return NJ_UNSUPPORTED
if (nj.error != __NJ_FINISHED): return nj.error
nj.error = NJ_OK
njConvert()
return nj.error
def njGetWidth():
return nj.width
def njGetHeight():
return nj.height
def njIsColor():
return (nj.ncomp != 1)
def njGetImage():
return nj.comp[0].pixels if nj.ncomp == 1 else nj.rgb
def njGetImageSize():
return nj.width * nj.height * nj.ncomp
#endif // _NJ_INCLUDE_HEADER_ONLY
| mit |
servo/servo | components/script/dom/bindings/codegen/parser/tests/test_treatNonCallableAsNull.py | 170 | 1701 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[TreatNonCallableAsNull] callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull1 {
attribute Function? onfoo;
attribute Function onbar;
};
""")
results = parser.finish()
iface = results[1]
attr = iface.members[0]
harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value")
attr = iface.members[1]
harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull2 {
[TreatNonCallableAsNull] attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
[TreatNonCallableAsNull]
interface TestTreatNonCallableAsNull3 {
attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[TreatNonCallableAsNull, TreatNonObjectAsNull]
callback Function = any(any... arguments);
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
jayme-github/CouchPotatoServer | libs/pyutil/test/deprecated/test_dictutil.py | 106 | 3612 | #!/usr/bin/env python
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, sys, traceback, unittest
from pyutil.assertutil import _assert
from pyutil import dictutil
class EqButNotIs:
def __init__(self, x):
self.x = x
self.hash = int(random.randrange(0, 2**31))
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.x,)
def __hash__(self):
return self.hash
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __ne__(self, other):
return self.x != other
def __eq__(self, other):
return self.x == other
class Testy(unittest.TestCase):
def _help_test_empty_dict(self, klass):
d1 = klass()
d2 = klass({})
self.failUnless(d1 == d2, "d1: %r, d2: %r" % (d1, d2,))
self.failUnless(len(d1) == 0)
self.failUnless(len(d2) == 0)
def _help_test_nonempty_dict(self, klass):
d1 = klass({'a': 1, 'b': "eggs", 3: "spam",})
d2 = klass({'a': 1, 'b': "eggs", 3: "spam",})
self.failUnless(d1 == d2)
self.failUnless(len(d1) == 3, "%s, %s" % (len(d1), d1,))
self.failUnless(len(d2) == 3)
def _help_test_eq_but_notis(self, klass):
d = klass({'a': 3, 'b': EqButNotIs(3), 'c': 3})
d.pop('b')
d.clear()
d['a'] = 3
d['b'] = EqButNotIs(3)
d['c'] = 3
d.pop('b')
d.clear()
d['b'] = EqButNotIs(3)
d['a'] = 3
d['c'] = 3
d.pop('b')
d.clear()
d['a'] = EqButNotIs(3)
d['c'] = 3
d['a'] = 3
d.clear()
fake3 = EqButNotIs(3)
fake7 = EqButNotIs(7)
d[fake3] = fake7
d[3] = 7
d[3] = 8
_assert(filter(lambda x: x is 8, d.itervalues()))
_assert(filter(lambda x: x is fake7, d.itervalues()))
_assert(not filter(lambda x: x is 7, d.itervalues())) # The real 7 should have been ejected by the d[3] = 8.
_assert(filter(lambda x: x is fake3, d.iterkeys()))
_assert(filter(lambda x: x is 3, d.iterkeys()))
d[fake3] = 8
d.clear()
d[3] = 7
fake3 = EqButNotIs(3)
fake7 = EqButNotIs(7)
d[fake3] = fake7
d[3] = 8
_assert(filter(lambda x: x is 8, d.itervalues()))
_assert(filter(lambda x: x is fake7, d.itervalues()))
_assert(not filter(lambda x: x is 7, d.itervalues())) # The real 7 should have been ejected by the d[3] = 8.
_assert(filter(lambda x: x is fake3, d.iterkeys()))
_assert(filter(lambda x: x is 3, d.iterkeys()))
d[fake3] = 8
def test_em(self):
for klass in (dictutil.UtilDict, dictutil.NumDict, dictutil.ValueOrderedDict,):
# print "name of class: ", klass
for helper in (self._help_test_empty_dict, self._help_test_nonempty_dict, self._help_test_eq_but_notis,):
# print "name of test func: ", helper
try:
helper(klass)
except:
(etype, evalue, realtb) = sys.exc_info()
traceback.print_exception(etype, evalue, realtb)
self.fail(evalue)
del realtb
def suite():
suite = unittest.makeSuite(Testy, 'test')
return suite
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/test/test_symtable.py | 20 | 6284 | """
Test the API of the symtable module.
"""
import symtable
import unittest
from test import test_support
TEST_CODE = """
import sys
glob = 42
class Mine:
instance_var = 24
def a_method(p1, p2):
pass
def spam(a, b, *var, **kw):
global bar
bar = 47
x = 23
glob
def internal():
return x
return internal
def foo():
exec 'm'
from sys import *
def namespace_test(): pass
def namespace_test(): pass
"""
def find_block(block, name):
for ch in block.get_children():
if ch.get_name() == name:
return ch
class SymtableTest(unittest.TestCase):
with test_support.check_warnings(
("import \* only allowed at module level", SyntaxWarning)):
top = symtable.symtable(TEST_CODE, "?", "exec")
# These correspond to scopes in TEST_CODE
Mine = find_block(top, "Mine")
a_method = find_block(Mine, "a_method")
spam = find_block(top, "spam")
internal = find_block(spam, "internal")
foo = find_block(top, "foo")
def test_type(self):
self.assertEqual(self.top.get_type(), "module")
self.assertEqual(self.Mine.get_type(), "class")
self.assertEqual(self.a_method.get_type(), "function")
self.assertEqual(self.spam.get_type(), "function")
self.assertEqual(self.internal.get_type(), "function")
def test_optimized(self):
self.assertFalse(self.top.is_optimized())
self.assertFalse(self.top.has_exec())
self.assertFalse(self.top.has_import_star())
self.assertTrue(self.spam.is_optimized())
self.assertFalse(self.foo.is_optimized())
self.assertTrue(self.foo.has_exec())
self.assertTrue(self.foo.has_import_star())
def test_nested(self):
self.assertFalse(self.top.is_nested())
self.assertFalse(self.Mine.is_nested())
self.assertFalse(self.spam.is_nested())
self.assertTrue(self.internal.is_nested())
def test_children(self):
self.assertTrue(self.top.has_children())
self.assertTrue(self.Mine.has_children())
self.assertFalse(self.foo.has_children())
def test_lineno(self):
self.assertEqual(self.top.get_lineno(), 0)
self.assertEqual(self.spam.get_lineno(), 11)
def test_function_info(self):
func = self.spam
self.assertEqual(func.get_parameters(), ("a", "b", "kw", "var"))
self.assertEqual(func.get_locals(),
("a", "b", "internal", "kw", "var", "x"))
self.assertEqual(func.get_globals(), ("bar", "glob"))
self.assertEqual(self.internal.get_frees(), ("x",))
def test_globals(self):
self.assertTrue(self.spam.lookup("glob").is_global())
self.assertFalse(self.spam.lookup("glob").is_declared_global())
self.assertTrue(self.spam.lookup("bar").is_global())
self.assertTrue(self.spam.lookup("bar").is_declared_global())
self.assertFalse(self.internal.lookup("x").is_global())
self.assertFalse(self.Mine.lookup("instance_var").is_global())
def test_local(self):
self.assertTrue(self.spam.lookup("x").is_local())
self.assertFalse(self.internal.lookup("x").is_local())
def test_referenced(self):
self.assertTrue(self.internal.lookup("x").is_referenced())
self.assertTrue(self.spam.lookup("internal").is_referenced())
self.assertFalse(self.spam.lookup("x").is_referenced())
def test_parameters(self):
for sym in ("a", "var", "kw"):
self.assertTrue(self.spam.lookup(sym).is_parameter())
self.assertFalse(self.spam.lookup("x").is_parameter())
def test_symbol_lookup(self):
self.assertEqual(len(self.top.get_identifiers()),
len(self.top.get_symbols()))
self.assertRaises(KeyError, self.top.lookup, "not_here")
def test_namespaces(self):
self.assertTrue(self.top.lookup("Mine").is_namespace())
self.assertTrue(self.Mine.lookup("a_method").is_namespace())
self.assertTrue(self.top.lookup("spam").is_namespace())
self.assertTrue(self.spam.lookup("internal").is_namespace())
self.assertTrue(self.top.lookup("namespace_test").is_namespace())
self.assertFalse(self.spam.lookup("x").is_namespace())
self.assertTrue(self.top.lookup("spam").get_namespace() is self.spam)
ns_test = self.top.lookup("namespace_test")
self.assertEqual(len(ns_test.get_namespaces()), 2)
self.assertRaises(ValueError, ns_test.get_namespace)
def test_assigned(self):
self.assertTrue(self.spam.lookup("x").is_assigned())
self.assertTrue(self.spam.lookup("bar").is_assigned())
self.assertTrue(self.top.lookup("spam").is_assigned())
self.assertTrue(self.Mine.lookup("a_method").is_assigned())
self.assertFalse(self.internal.lookup("x").is_assigned())
def test_imported(self):
self.assertTrue(self.top.lookup("sys").is_imported())
def test_name(self):
self.assertEqual(self.top.get_name(), "top")
self.assertEqual(self.spam.get_name(), "spam")
self.assertEqual(self.spam.lookup("x").get_name(), "x")
self.assertEqual(self.Mine.get_name(), "Mine")
def test_class_info(self):
self.assertEqual(self.Mine.get_methods(), ('a_method',))
def test_filename_correct(self):
### Bug tickler: SyntaxError file name correct whether error raised
### while parsing or building symbol table.
def checkfilename(brokencode):
try:
symtable.symtable(brokencode, "spam", "exec")
except SyntaxError as e:
self.assertEqual(e.filename, "spam")
else:
self.fail("no SyntaxError for %r" % (brokencode,))
checkfilename("def f(x): foo)(") # parse-time
checkfilename("def f(x): global x") # symtable-build-time
def test_eval(self):
symbols = symtable.symtable("42", "?", "eval")
def test_single(self):
symbols = symtable.symtable("42", "?", "single")
def test_exec(self):
symbols = symtable.symtable("def f(x): return x", "?", "exec")
def test_main():
test_support.run_unittest(SymtableTest)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
sahiljain/catapult | firefighter/default/common/query_filter.py | 7 | 1776 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import time
_INTEGER_PARAMETERS = (
'build',
'device_shard',
'host_shard',
'status',
)
# TODO(dtu): Pull these from table.
_STRING_PARAMETERS = (
'benchmark',
'builder',
'configuration',
'device_id',
'hostname',
'master',
'os',
'os_version',
'role',
)
def Filters(request):
filters = {}
for parameter_name in _INTEGER_PARAMETERS:
parameter_values = request.get_all(parameter_name)
if parameter_values:
filters[parameter_name] = map(int, parameter_values)
for parameter_name in _STRING_PARAMETERS:
parameter_values = request.get_all(parameter_name)
if parameter_values:
for parameter_value in parameter_values:
if re.search(r'[^A-Za-z0-9\(\)-_. ]', parameter_value):
raise ValueError('invalid %s: "%s"' %
(parameter_name, parameter_value))
filters[parameter_name] = parameter_values
start_time = request.get('start_time')
if start_time:
filters['start_time'] = _ParseTime(start_time)
end_time = request.get('end_time')
if end_time:
filters['end_time'] = _ParseTime(end_time)
return filters
def _ParseTime(time_parameter):
units = {
's': 1,
'm': 60,
'h': 60 * 60,
'd': 60 * 60 * 24,
'w': 60 * 60 * 24 * 7,
}
unit = time_parameter[-1]
if unit in units:
time_delta = -abs(float(time_parameter[:-1]))
time_parameter = time_delta * units[unit]
else:
time_parameter = float(time_parameter)
if time_parameter < 0:
time_parameter = time.time() + time_parameter
return time_parameter
| bsd-3-clause |
abenzbiria/clients_odoo | openerp/addons/base/res/res_bank.py | 41 | 10265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class Bank(osv.osv):
_description='Bank'
_name = 'res.bank'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'state': fields.many2one("res.country.state", 'Fed. State',
domain="[('country_id', '=', country)]"),
'country': fields.many2one('res.country', 'Country'),
'email': fields.char('Email'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'active': fields.boolean('Active'),
'bic': fields.char('Bank Identifier Code', size=64,
help="Sometimes called BIC or Swift."),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context=None):
result = []
for bank in self.browse(cr, uid, ids, context):
result.append((bank.id, (bank.bic and (bank.bic + ' - ') or '') + bank.name))
return result
class res_partner_bank_type(osv.osv):
_description='Bank Account Type'
_name = 'res.partner.bank.type'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'code': fields.char('Code', size=64, required=True),
'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
'format_layout': fields.text('Format Layout', translate=True)
}
_defaults = {
'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
}
class res_partner_bank_type_fields(osv.osv):
_description='Bank type fields'
_name = 'res.partner.bank.type.field'
_order = 'name'
_columns = {
'name': fields.char('Field Name', required=True, translate=True),
'bank_type_id': fields.many2one('res.partner.bank.type', 'Bank Type', required=True, ondelete='cascade'),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'size': fields.integer('Max. Size'),
}
class res_partner_bank(osv.osv):
'''Bank Accounts'''
_name = "res.partner.bank"
_rec_name = "acc_number"
_description = __doc__
_order = 'sequence'
def _bank_type_get(self, cr, uid, context=None):
bank_type_obj = self.pool.get('res.partner.bank.type')
result = []
type_ids = bank_type_obj.search(cr, uid, [])
bank_types = bank_type_obj.browse(cr, uid, type_ids, context=context)
for bank_type in bank_types:
result.append((bank_type.code, bank_type.name))
return result
def _default_value(self, cursor, user, field, context=None):
if context is None: context = {}
if field in ('country_id', 'state_id'):
value = False
else:
value = ''
if not context.get('address'):
return value
for address in self.pool.get('res.partner').resolve_2many_commands(
cursor, user, 'address', context['address'], ['type', field], context=context):
if address.get('type') == 'default':
return address.get(field, value)
elif not address.get('type'):
value = address.get(field, value)
return value
_columns = {
'name': fields.char('Bank Account'), # to be removed in v6.2 ?
'acc_number': fields.char('Account Number', size=64, required=True),
'bank': fields.many2one('res.bank', 'Bank'),
'bank_bic': fields.char('Bank Identifier Code', size=16),
'bank_name': fields.char('Bank Name'),
'owner_name': fields.char('Account Owner Name'),
'street': fields.char('Street'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'country_id': fields.many2one('res.country', 'Country',
change_default=True),
'state_id': fields.many2one("res.country.state", 'Fed. State',
change_default=True, domain="[('country_id','=',country_id)]"),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', help="Only if this bank account belong to your company"),
'partner_id': fields.many2one('res.partner', 'Account Owner', ondelete='cascade', select=True),
'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
change_default=True),
'sequence': fields.integer('Sequence'),
'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
}
_defaults = {
'owner_name': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'name', context=context),
'street': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'street', context=context),
'city': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'city', context=context),
'zip': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'zip', context=context),
'country_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'country_id', context=context),
'state_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'state_id', context=context),
'name': '/'
}
def fields_get(self, cr, uid, allfields=None, context=None):
res = super(res_partner_bank, self).fields_get(cr, uid, allfields=allfields, context=context)
bank_type_obj = self.pool.get('res.partner.bank.type')
type_ids = bank_type_obj.search(cr, uid, [])
types = bank_type_obj.browse(cr, uid, type_ids)
for type in types:
for field in type.field_ids:
if field.name in res:
res[field.name].setdefault('states', {})
res[field.name]['states'][type.code] = [
('readonly', field.readonly),
('required', field.required)]
return res
def _prepare_name_get(self, cr, uid, bank_dicts, context=None):
""" Format the name of a res.partner.bank.
This function is designed to be inherited to add replacement fields.
:param bank_dicts: a list of res.partner.bank dicts, as returned by the method read()
:return: [(id, name), ...], as returned by the method name_get()
"""
# prepare a mapping {code: format_layout} for all bank types
bank_type_obj = self.pool.get('res.partner.bank.type')
bank_types = bank_type_obj.browse(cr, uid, bank_type_obj.search(cr, uid, []), context=context)
bank_code_format = dict((bt.code, bt.format_layout) for bt in bank_types)
res = []
for data in bank_dicts:
name = data['acc_number']
if data['state'] and bank_code_format.get(data['state']):
try:
if not data.get('bank_name'):
data['bank_name'] = _('BANK')
name = bank_code_format[data['state']] % data
except Exception:
raise osv.except_osv(_("Formating Error"), _("Invalid Bank Account Type Name format."))
res.append((data.get('id', False), name))
return res
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
bank_dicts = self.read(cr, uid, ids, self.fields_get_keys(cr, uid, context=context), context=context)
return self._prepare_name_get(cr, uid, bank_dicts, context=context)
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
result = {}
if company_id:
c = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if c.partner_id:
r = self.onchange_partner_id(cr, uid, ids, c.partner_id.id, context=context)
r['value']['partner_id'] = c.partner_id.id
r['value']['footer'] = 1
result = r
return result
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
result = {}
if bank_id:
bank = self.pool.get('res.bank').browse(cr, uid, bank_id, context=context)
result['bank_name'] = bank.name
result['bank_bic'] = bank.bic
return {'value': result}
def onchange_partner_id(self, cr, uid, id, partner_id, context=None):
result = {}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
result['owner_name'] = part.name
result['street'] = part.street or False
result['city'] = part.city or False
result['zip'] = part.zip or False
result['country_id'] = part.country_id.id
result['state_id'] = part.state_id.id
return {'value': result}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
niftynei/zulip | zerver/lib/debug.py | 18 | 1241 | from __future__ import absolute_import
import code
import traceback
import signal
from types import FrameType
from typing import Optional
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig, frame):
# type: (int, Optional[FrameType]) -> None
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal recieved : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen():
# type: () -> None
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
| apache-2.0 |
amith01994/intellij-community | python/helpers/pycharm/nose_utils.py | 28 | 7224 | import sys
import traceback
import datetime
import unittest
from tcmessages import TeamcityServiceMessages
from tcunittest import strclass
from tcunittest import TeamcityTestResult
try:
from nose.util import isclass # backwards compat
from nose.config import Config
from nose.result import TextTestResult
from nose import SkipTest
from nose.plugins.errorclass import ErrorClassPlugin
except (Exception, ):
e = sys.exc_info()[1]
raise NameError(
"Something went wrong, do you have nosetest installed? I got this error: %s" % e)
class TeamcityPlugin(ErrorClassPlugin, TextTestResult, TeamcityTestResult):
"""
TeamcityTest plugin for nose tests
"""
name = "TeamcityPlugin"
enabled = True
def __init__(self, stream=sys.stderr, descriptions=None, verbosity=1,
config=None, errorClasses=None):
super(TeamcityPlugin, self).__init__()
if errorClasses is None:
errorClasses = {}
self.errorClasses = errorClasses
if config is None:
config = Config()
self.config = config
self.output = stream
self.messages = TeamcityServiceMessages(self.output,
prepend_linebreak=True)
self.messages.testMatrixEntered()
self.current_suite = None
TextTestResult.__init__(self, stream, descriptions, verbosity, config,
errorClasses)
TeamcityTestResult.__init__(self, stream)
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
def addError(self, test, err):
exctype, value, tb = err
err = self.formatErr(err)
if exctype == SkipTest:
self.messages.testIgnored(self.getTestName(test), message='Skip')
else:
self.messages.testError(self.getTestName(test), message='Error', details=err, duration=self.__getDuration(test))
def formatErr(self, err):
exctype, value, tb = err
if isinstance(value, str):
try:
value = exctype(value)
except TypeError:
pass
return ''.join(traceback.format_exception(exctype, value, tb))
def is_gen(self, test):
if hasattr(test, "test") and hasattr(test.test, "descriptor"):
if test.test.descriptor is not None:
return True
return False
def getTestName(self, test):
if hasattr(test, "error_context"):
return test.error_context
test_name_full = str(test)
if self.is_gen(test):
return test_name_full
ind_1 = test_name_full.rfind('(')
if ind_1 != -1:
return test_name_full[:ind_1]
return test_name_full
def addFailure(self, test, err):
err = self.formatErr(err)
self.messages.testFailed(self.getTestName(test),
message='Failure', details=err)
def addSkip(self, test, reason):
self.messages.testIgnored(self.getTestName(test), message=reason)
def _getSuite(self, test):
if hasattr(test, "suite"):
suite = strclass(test.suite)
suite_location = test.suite.location
location = test.suite.abs_location
if hasattr(test, "lineno"):
location = location + ":" + str(test.lineno)
else:
location = location + ":" + str(test.test.lineno)
else:
suite = strclass(test.__class__)
suite_location = "python_nosetestid://" + suite
try:
from nose.util import func_lineno
if hasattr(test.test, "descriptor") and test.test.descriptor:
suite_location = "file://" + self.test_address(
test.test.descriptor)
location = suite_location + ":" + str(
func_lineno(test.test.descriptor))
else:
suite_location = "file://" + self.test_address(
test.test.test)
location = "file://" + self.test_address(
test.test.test) + ":" + str(func_lineno(test.test.test))
except:
test_id = test.id()
suite_id = test_id[:test_id.rfind(".")]
suite_location = "python_nosetestid://" + str(suite_id)
location = "python_nosetestid://" + str(test_id)
return (location, suite_location)
def test_address(self, test):
if hasattr(test, "address"):
return test.address()[0]
t = type(test)
file = None
import types, os
if (t == types.FunctionType or issubclass(t, type) or t == type
or isclass(test)):
module = getattr(test, '__module__', None)
if module is not None:
m = sys.modules[module]
file = getattr(m, '__file__', None)
if file is not None:
file = os.path.abspath(file)
if file.endswith("pyc"):
file = file[:-1]
return file
raise TypeError("I don't know what %s is (%s)" % (test, t))
def getSuiteName(self, test):
test_name_full = str(test)
ind_1 = test_name_full.rfind('(')
if self.is_gen(test) and ind_1 != -1:
ind = test_name_full[:ind_1].rfind('.')
if ind != -1:
return test_name_full[:ind]
if ind_1 != -1:
return test_name_full[ind_1 + 1: -1]
ind = test_name_full.rfind('.')
if ind != -1:
return test_name_full[:test_name_full.rfind(".")]
return test_name_full
def startTest(self, test):
location, suite_location = self._getSuite(test)
suite = self.getSuiteName(test)
if suite != self.current_suite:
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = suite
self.messages.testSuiteStarted(self.current_suite,
location=suite_location)
setattr(test, "startTime", datetime.datetime.now())
self.messages.testStarted(self.getTestName(test), location=location)
def stopTest(self, test):
duration = self.__getDuration(test)
self.messages.testFinished(self.getTestName(test),
duration=int(duration))
def __getDuration(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration = d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
return duration
def finalize(self, result):
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = None
class TeamcityNoseRunner(unittest.TextTestRunner):
"""Test runner that supports teamcity output
"""
def __init__(self, stream=sys.stdout, descriptions=1, verbosity=1,
config=None):
if config is None:
config = Config()
self.config = config
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
def _makeResult(self):
return TeamcityPlugin(self.stream,
self.descriptions,
self.verbosity,
self.config)
def run(self, test):
"""Overrides to provide plugin hooks and defer all output to
the test result class.
"""
#for 2.5 compat
plugins = self.config.plugins
plugins.configure(self.config.options, self.config)
plugins.begin()
wrapper = plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
# plugins can decorate or capture the output stream
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
test(result)
result.endLastSuite()
plugins.finalize(result)
return result | apache-2.0 |
egafford/sahara | sahara/api/v2/jobs.py | 2 | 2884 | # Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.api import acl
from sahara.service.api.v2 import job_executions as j_e_api
from sahara.service.api.v2 import jobs as api
from sahara.service import validation as v
from sahara.service.validations.edp import job as v_j
from sahara.service.validations.edp import job_execution as v_j_e
from sahara.service.validations.edp import job_execution_schema as v_j_e_schema
from sahara.service.validations.edp import job_schema as v_j_schema
import sahara.utils.api as u
rest = u.RestV2('jobs', __name__)
@rest.get('/job-templates')
@acl.enforce("data-processing:jobs:get_all")
@v.check_exists(api.get_job, 'marker')
@v.validate(None, v.validate_pagination_limit,
v.validate_sorting_jobs)
def job_list():
result = api.get_jobs(**u.get_request_args().to_dict())
return u.render(res=result, name='job_templates')
@rest.post('/job-templates')
@acl.enforce("data-processing:jobs:create")
@v.validate(v_j_schema.JOB_SCHEMA, v_j.check_mains_libs, v_j.check_interface)
def job_create(data):
return u.render(api.create_job(data).to_wrapped_dict())
@rest.get('/job-templates/<job_id>')
@acl.enforce("data-processing:jobs:get")
@v.check_exists(api.get_job, id='job_id')
def job_get(job_id):
return u.to_wrapped_dict(api.get_job, job_id)
@rest.patch('/job-templates/<job_id>')
@acl.enforce("data-processing:jobs:modify")
@v.check_exists(api.get_job, id='job_id')
@v.validate(v_j_schema.JOB_UPDATE_SCHEMA)
def job_update(job_id, data):
return u.to_wrapped_dict(api.update_job, job_id, data)
@rest.delete('/job-templates/<job_id>')
@acl.enforce("data-processing:jobs:delete")
@v.check_exists(api.get_job, id='job_id')
def job_delete(job_id):
api.delete_job(job_id)
return u.render()
@rest.post('/job-templates/<job_id>/execute')
@acl.enforce("data-processing:jobs:execute")
@v.check_exists(api.get_job, id='job_id')
@v.validate(v_j_e_schema.JOB_EXEC_SCHEMA, v_j_e.check_job_execution)
def job_execute(job_id, data):
return u.render(job_execution=j_e_api.execute_job(job_id, data).to_dict())
@rest.get('/job-templates/config-hints/<job_type>')
@acl.enforce("data-processing:jobs:get_config_hints")
@v.check_exists(api.get_job_config_hints, job_type='job_type')
def job_config_hints_get(job_type):
return u.render(api.get_job_config_hints(job_type))
| apache-2.0 |
jlegendary/youtube-dl | youtube_dl/extractor/anitube.py | 138 | 1721 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class AnitubeIE(InfoExtractor):
IE_NAME = 'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
_TEST = {
'url': 'http://www.anitube.se/video/36621',
'md5': '59d0eeae28ea0bc8c05e7af429998d43',
'info_dict': {
'id': '36621',
'ext': 'mp4',
'title': 'Recorder to Randoseru 01',
'duration': 180.19,
},
'skip': 'Blocked in the US',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
key = self._html_search_regex(
r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
config_xml = self._download_xml(
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
video_title = config_xml.find('title').text
thumbnail = config_xml.find('image').text
duration = float(config_xml.find('duration').text)
formats = []
video_url = config_xml.find('file')
if video_url is not None:
formats.append({
'format_id': 'sd',
'url': video_url.text,
})
video_url = config_xml.find('filehd')
if video_url is not None:
formats.append({
'format_id': 'hd',
'url': video_url.text,
})
return {
'id': video_id,
'title': video_title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
| unlicense |
cjh1/VTK | Examples/Tutorial/Step3/Python/Cone3.py | 24 | 2685 | #!/usr/bin/env python
#
# This example demonstrates how to use multiple renderers within a
# render window. It is a variation of the Cone.py example. Please
# refer to that example for additional documentation.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
#
# Create two renderers and assign actors to them. A renderer renders into a
# viewport within the vtkRenderWindow. It is part or all of a window on the
# screen and it is responsible for drawing the actors it has. We also set
# the background color here. In this example we are adding the same actor
# to two different renderers; it is okay to add different actors to
# different renderers as well.
#
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
ren1.SetViewport(0.0, 0.0, 0.5, 1.0)
ren2 = vtk.vtkRenderer()
ren2.AddActor(coneActor)
ren2.SetBackground(0.1, 0.2, 0.4)
ren2.SetViewport(0.5, 0.0, 1.0, 1.0)
#
# Finally we create the render window which will show up on the screen.
# We add our two renderers into the render window using AddRenderer. We also
# set the size to be 600 pixels by 300.
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.AddRenderer( ren2 )
renWin.SetSize(600, 300)
#
# Make one camera view 90 degrees from other.
#
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(90)
#
# Now we loop over 360 degreeees and render the cone each time.
#
for i in range(0,360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth( 1 )
ren2.GetActiveCamera().Azimuth( 1 )
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.