repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
blazek/QGIS | tests/src/python/test_provider_spatialite.py | 10 | 50214 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSpatialiteProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Vincent Mora'
__date__ = '09/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
import re
import sys
import shutil
import tempfile
from qgis.core import (QgsProviderRegistry,
QgsVectorLayer,
QgsVectorDataProvider,
QgsPointXY,
QgsFeature,
QgsGeometry,
QgsProject,
QgsFieldConstraints,
QgsVectorLayerUtils,
QgsSettings,
QgsDefaultValue,
QgsFeatureRequest,
QgsRectangle,
QgsWkbTypes)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from providertestbase import ProviderTestCase
from qgis.PyQt.QtCore import QObject, QVariant
from qgis.utils import spatialite_connect
# Pass no_exit=True: for some reason this crashes sometimes on exit on Travis
start_app(True)
TEST_DATA_DIR = unitTestDataPath()
def count_opened_filedescriptors(filename_to_test):
count = -1
if sys.platform.startswith('linux'):
count = 0
open_files_dirname = '/proc/%d/fd' % os.getpid()
filenames = os.listdir(open_files_dirname)
for filename in filenames:
full_filename = open_files_dirname + '/' + filename
if os.path.exists(full_filename):
link = os.readlink(full_filename)
if os.path.basename(link) == os.path.basename(filename_to_test):
count += 1
return count
class TestQgsSpatialiteProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
print(' ### Setup Spatialite Provider Test Class')
# setup provider for base tests
cls.vl = QgsVectorLayer('dbname=\'{}/provider/spatialite.db\' table="somedata" (geom) sql='.format(TEST_DATA_DIR), 'test', 'spatialite')
assert(cls.vl.isValid())
cls.source = cls.vl.dataProvider()
cls.vl_poly = QgsVectorLayer('dbname=\'{}/provider/spatialite.db\' table="somepolydata" (geom) sql='.format(TEST_DATA_DIR), 'test', 'spatialite')
assert(cls.vl_poly.isValid())
cls.poly_provider = cls.vl_poly.dataProvider()
# create test db
cls.dbname = os.path.join(tempfile.gettempdir(), "test.sqlite")
if os.path.exists(cls.dbname):
os.remove(cls.dbname)
con = spatialite_connect(cls.dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with Z dimension geometry
sql = "CREATE TABLE test_z (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_z', 'geometry', 4326, 'POINT', 'XYZ')"
cur.execute(sql)
sql = "INSERT INTO test_z (id, name, geometry) "
sql += "VALUES (1, 'toto 2', GeomFromText('POINT Z (0 0 1)', 4326))"
cur.execute(sql)
# table with M value geometry
sql = "CREATE TABLE test_m (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_m', 'geometry', 4326, 'POINT', 'XYM')"
cur.execute(sql)
sql = "INSERT INTO test_m (id, name, geometry) "
sql += "VALUES (1, 'toto 3', GeomFromText('POINT M (0 0 1)', 4326))"
cur.execute(sql)
# table with Z dimension and M value geometry
sql = "CREATE TABLE test_zm (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_zm', 'geometry', 4326, 'POINT', 'XYZM')"
cur.execute(sql)
sql = "INSERT INTO test_zm (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POINT ZM (0 0 1 1)', 4326))"
cur.execute(sql)
# table with multiple column primary key
sql = "CREATE TABLE test_pg_mk (id INTEGER NOT NULL, name TEXT NOT NULL, PRIMARY KEY(id,name))"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg_mk', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg_mk (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_q (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_q', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_q (id, name, geometry) "
sql += "VALUES (11, 'toto 11', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_q (id, name, geometry) "
sql += "VALUES (21, 'toto 12', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# simple table with a geometry column named 'Geometry'
sql = "CREATE TABLE test_n (Id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_n', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_n (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_n (id, name, geometry) "
sql += "VALUES (2, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with different array types, stored as JSON
sql = "CREATE TABLE test_arrays (Id INTEGER NOT NULL PRIMARY KEY, strings JSONSTRINGLIST NOT NULL, ints JSONINTEGERLIST NOT NULL, reals JSONREALLIST NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_arrays', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_arrays (id, strings, ints, reals, geometry) "
sql += "VALUES (1, '[\"toto\",\"tutu\"]', '[1,-2,724562]', '[1.0, -232567.22]', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with different array types, stored as JSON
sql = "CREATE TABLE test_arrays_write (Id INTEGER NOT NULL PRIMARY KEY, array JSONARRAY NOT NULL, strings JSONSTRINGLIST NOT NULL, ints JSONINTEGERLIST NOT NULL, reals JSONREALLIST NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_arrays_write', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
# 2 tables with relations
sql = "PRAGMA foreign_keys = ON;"
cur.execute(sql)
sql = "CREATE TABLE test_relation_a(artistid INTEGER PRIMARY KEY, artistname TEXT);"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_relation_a', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "CREATE TABLE test_relation_b(trackid INTEGER, trackname TEXT, trackartist INTEGER, FOREIGN KEY(trackartist) REFERENCES test_relation_a(artistid));"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_relation_b', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
# table to test auto increment
sql = "CREATE TABLE test_autoincrement(id INTEGER PRIMARY KEY AUTOINCREMENT, num INTEGER);"
cur.execute(sql)
sql = "INSERT INTO test_autoincrement (num) VALUES (123);"
cur.execute(sql)
# tables with constraints
sql = "CREATE TABLE test_constraints(id INTEGER PRIMARY KEY, num INTEGER NOT NULL, desc TEXT UNIQUE, desc2 TEXT, num2 INTEGER NOT NULL UNIQUE)"
cur.execute(sql)
# simple table with defaults
sql = "CREATE TABLE test_defaults (id INTEGER NOT NULL PRIMARY KEY, name TEXT DEFAULT 'qgis ''is good', number INTEGER DEFAULT 5, number2 REAL DEFAULT 5.7, no_default REAL)"
cur.execute(sql)
# simple table with catgorized points
sql = "CREATE TABLE test_filter (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_filter', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (1, 'ext', GeomFromText('POINT(0 0)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (2, 'ext', GeomFromText('POINT(0 3)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (3, 'ext', GeomFromText('POINT(3 3)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (4, 'ext', GeomFromText('POINT(3 0)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (5, 'int', GeomFromText('POINT(1 1)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (6, 'int', GeomFromText('POINT(1 2)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (7, 'int', GeomFromText('POINT(2 2)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (8, 'int', GeomFromText('POINT(2 1)', 4326))"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
cls.dirs_to_cleanup = []
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
print(' ### Tear Down Spatialite Provider Test Class')
# for the time being, keep the file to check with qgis
# if os.path.exists(cls.dbname) :
# os.remove(cls.dbname)
for dirname in cls.dirs_to_cleanup:
shutil.rmtree(dirname, True)
def getSource(self):
tmpdir = tempfile.mkdtemp()
self.dirs_to_cleanup.append(tmpdir)
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
datasource = os.path.join(tmpdir, 'spatialite.db')
shutil.copy(os.path.join(srcpath, 'spatialite.db'), datasource)
vl = QgsVectorLayer(
'dbname=\'{}\' table="somedata" (geom) sql='.format(datasource), 'test',
'spatialite')
return vl
def getEditableLayer(self):
return self.getSource()
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
return set(['cnt = 10 ^ 2',
'"name" ~ \'[OP]ra[gne]+\'',
'sqrt(pk) >= 2',
'radians(cnt) < 2',
'degrees(pk) <= 200',
'cos(pk) < 0',
'sin(pk) < 0',
'tan(pk) < 0',
'acos(-1) < pk',
'asin(1) < pk',
'atan(3.14) < pk',
'atan2(3.14, pk) < 1',
'exp(pk) < 10',
'ln(pk) <= 1',
'log(3, pk) <= 1',
'log10(pk) < 0.5',
'floor(3.14) <= pk',
'ceil(3.14) <= pk',
'pk < pi()',
'floor(cnt / 66.67) <= 2',
'ceil(cnt / 66.67) <= 2',
'pk < pi() / 2',
'x($geometry) < -70',
'y($geometry) > 70',
'xmin($geometry) < -70',
'ymin($geometry) > 70',
'xmax($geometry) < -70',
'ymax($geometry) > 70',
'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)',
'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7',
'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))',
'x($geometry) < -70',
'y($geometry) > 79',
'xmin($geometry) < -70',
'ymin($geometry) < 76',
'xmax($geometry) > -68',
'ymax($geometry) > 80',
'area($geometry) > 10',
'perimeter($geometry) < 12',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\')) = \'FF2FF1212\'',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'), \'****F****\')',
'crosses($geometry,geom_from_wkt( \'Linestring (-68.2 82.1, -66.95 82.1, -66.95 79.05)\'))',
'overlaps($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'))',
'within($geometry,geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(translate($geometry,-1,-1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(buffer($geometry,1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'intersects(centroid($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
'intersects(point_on_surface($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
])
def partiallyCompiledFilters(self):
return set(['"name" NOT LIKE \'Ap%\'',
'name LIKE \'Apple\'',
'name LIKE \'aPple\'',
'name LIKE \'Ap_le\'',
'name LIKE \'Ap\\_le\''
])
def test_SplitFeature(self):
"""Create SpatiaLite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg (geometry)" % self.dbname, "test_pg", "spatialite")
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.75, -0.5), QgsPointXY(0.75, 1.5)], 0), 0)
self.assertEqual(layer.splitFeatures([QgsPointXY(-0.5, 0.25), QgsPointXY(1.5, 0.25)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 4)
def test_SplitFeatureWithMultiKey(self):
"""Create SpatiaLite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg_mk (geometry)" % self.dbname, "test_pg_mk", "spatialite")
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.5, -0.5), QgsPointXY(0.5, 1.5)], 0), 0)
self.assertEqual(layer.splitFeatures([QgsPointXY(-0.5, 0.5), QgsPointXY(1.5, 0.5)], 0), 0)
self.assertTrue(layer.commitChanges())
def test_queries(self):
"""Test loading of query-based layers"""
# a query with a geometry, but no unique id
# the id will be autoincremented
l = QgsVectorLayer("dbname=%s table='(select * from test_q)' (geometry)" % self.dbname, "test_pg_query1", "spatialite")
self.assertTrue(l.isValid())
# the id() is autoincremented
sum_id1 = sum(f.id() for f in l.getFeatures())
# the attribute 'id' works
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32) # 11 + 21
self.assertEqual(sum_id2, 32) # 11 + 21
# and now with an id declared
l = QgsVectorLayer("dbname=%s table='(select * from test_q)' (geometry) key='id'" % self.dbname, "test_pg_query1", "spatialite")
self.assertTrue(l.isValid())
sum_id1 = sum(f.id() for f in l.getFeatures())
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32)
self.assertEqual(sum_id2, 32)
# a query, but no geometry
l = QgsVectorLayer("dbname=%s table='(select id,name from test_q)' key='id'" % self.dbname, "test_pg_query1", "spatialite")
self.assertTrue(l.isValid())
sum_id1 = sum(f.id() for f in l.getFeatures())
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32)
self.assertEqual(sum_id2, 32)
def test_zm(self):
"""Test Z dimension and M value"""
l = QgsVectorLayer("dbname=%s table='test_z' (geometry) key='id'" % self.dbname, "test_z", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasZ(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.z(), 1.0)
l = QgsVectorLayer("dbname=%s table='test_m' (geometry) key='id'" % self.dbname, "test_m", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasM(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.m(), 1.0)
l = QgsVectorLayer("dbname=%s table='test_zm' (geometry) key='id'" % self.dbname, "test_zm", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasZ(l.wkbType()))
self.assertTrue(QgsWkbTypes.hasM(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.z(), 1.0)
self.assertEqual(geom.m(), 1.0)
def test_case(self):
"""Test case sensitivity issues"""
l = QgsVectorLayer("dbname=%s table='test_n' (geometry) key='id'" % self.dbname, "test_n1", "spatialite")
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().fields().count(), 2)
fields = [f.name() for f in l.dataProvider().fields()]
self.assertTrue('Geometry' not in fields)
def test_invalid_iterator(self):
""" Test invalid iterator """
corrupt_dbname = self.dbname + '.corrupt'
shutil.copy(self.dbname, corrupt_dbname)
layer = QgsVectorLayer("dbname=%s table=test_pg (geometry)" % corrupt_dbname, "test_pg", "spatialite")
# Corrupt the database
with open(corrupt_dbname, 'wb') as f:
f.write(b'')
layer.getFeatures()
layer = None
os.unlink(corrupt_dbname)
def testNoDanglingFileDescriptorAfterCloseVariant1(self):
''' Test that when closing the provider all file handles are released '''
temp_dbname = self.dbname + '.no_dangling_test1'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" % temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
# The iterator will take one extra connection
myiter = vl.getFeatures()
print((vl.featureCount()))
# Consume one feature but the iterator is still opened
f = next(myiter)
self.assertTrue(f.isValid())
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
# does NO release one file descriptor, because shared with the iterator
del vl
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
f = next(myiter)
self.assertTrue(f.isValid())
# Should release one file descriptor
del myiter
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 0)
# Check that deletion works well (can only fail on Windows)
os.unlink(temp_dbname)
self.assertFalse(os.path.exists(temp_dbname))
def testNoDanglingFileDescriptorAfterCloseVariant2(self):
''' Test that when closing the provider all file handles are released '''
temp_dbname = self.dbname + '.no_dangling_test2'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" % temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.isValid())
# Consume all features.
myiter = vl.getFeatures()
for feature in myiter:
pass
# The iterator is closed
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
# Should release one file descriptor
del vl
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 0)
# Check that deletion works well (can only fail on Windows)
os.unlink(temp_dbname)
self.assertFalse(os.path.exists(temp_dbname))
def test_arrays(self):
"""Test loading of layers with arrays"""
l = QgsVectorLayer("dbname=%s table=test_arrays (geometry)" % self.dbname, "test_arrays", "spatialite")
self.assertTrue(l.isValid())
features = [f for f in l.getFeatures()]
self.assertEqual(len(features), 1)
strings_field = l.fields().field('strings')
self.assertEqual(strings_field.typeName(), 'jsonstringlist')
self.assertEqual(strings_field.type(), QVariant.StringList)
self.assertEqual(strings_field.subType(), QVariant.String)
strings = features[0].attributes()[1]
self.assertEqual(strings, ['toto', 'tutu'])
ints_field = l.fields().field('ints')
self.assertEqual(ints_field.typeName(), 'jsonintegerlist')
self.assertEqual(ints_field.type(), QVariant.List)
self.assertEqual(ints_field.subType(), QVariant.LongLong)
ints = features[0].attributes()[2]
self.assertEqual(ints, [1, -2, 724562])
reals_field = l.fields().field('reals')
self.assertEqual(reals_field.typeName(), 'jsonreallist')
self.assertEqual(reals_field.type(), QVariant.List)
self.assertEqual(reals_field.subType(), QVariant.Double)
reals = features[0].attributes()[3]
self.assertEqual(reals, [1.0, -232567.22])
new_f = QgsFeature(l.fields())
new_f['id'] = 2
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
def test_arrays_write(self):
"""Test writing of layers with arrays"""
l = QgsVectorLayer("dbname=%s table=test_arrays_write (geometry)" % self.dbname, "test_arrays", "spatialite")
self.assertTrue(l.isValid())
new_f = QgsFeature(l.fields())
new_f['id'] = 2
new_f['array'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['array'], new_f['array'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
new_f = QgsFeature(l.fields())
new_f['id'] = 3
new_f['array'] = [1, 1.2345, '"doubleQuote"', "'quote'", 'back\\slash']
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['array'], new_f['array'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
read_back = l.getFeature(new_f['id'])
def test_discover_relation(self):
artist = QgsVectorLayer("dbname=%s table=test_relation_a (geometry)" % self.dbname, "test_relation_a", "spatialite")
self.assertTrue(artist.isValid())
track = QgsVectorLayer("dbname=%s table=test_relation_b (geometry)" % self.dbname, "test_relation_b", "spatialite")
self.assertTrue(track.isValid())
QgsProject.instance().addMapLayer(artist)
QgsProject.instance().addMapLayer(track)
try:
relMgr = QgsProject.instance().relationManager()
relations = relMgr.discoverRelations([], [artist, track])
relations = {r.name(): r for r in relations}
self.assertEqual({'fk_test_relation_b_0'}, set(relations.keys()))
a2t = relations['fk_test_relation_b_0']
self.assertTrue(a2t.isValid())
self.assertEqual('test_relation_b', a2t.referencingLayer().name())
self.assertEqual('test_relation_a', a2t.referencedLayer().name())
self.assertEqual([2], a2t.referencingFields())
self.assertEqual([0], a2t.referencedFields())
finally:
QgsProject.instance().removeMapLayer(track.id())
QgsProject.instance().removeMapLayer(artist.id())
def testNotNullConstraint(self):
vl = QgsVectorLayer("dbname=%s table=test_constraints key='id'" % self.dbname, "test_constraints",
"spatialite")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 5)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1), QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0) & QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.dataProvider().fieldConstraints(1) & QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(2) & QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(3) & QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.dataProvider().fieldConstraints(4) & QgsFieldConstraints.ConstraintNotNull)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints() & QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull), QgsFieldConstraints.ConstraintOriginProvider)
self.assertTrue(fields.at(1).constraints().constraints() & QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull), QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(2).constraints().constraints() & QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(fields.at(3).constraints().constraints() & QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(fields.at(4).constraints().constraints() & QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(4).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull), QgsFieldConstraints.ConstraintOriginProvider)
def testUniqueConstraint(self):
vl = QgsVectorLayer("dbname=%s table=test_constraints key='id'" % self.dbname, "test_constraints",
"spatialite")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 5)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1), QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0) & QgsFieldConstraints.ConstraintUnique)
self.assertFalse(vl.dataProvider().fieldConstraints(1) & QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(2) & QgsFieldConstraints.ConstraintUnique)
self.assertFalse(vl.dataProvider().fieldConstraints(3) & QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(4) & QgsFieldConstraints.ConstraintUnique)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique), QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(1).constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertTrue(fields.at(2).constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(2).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique), QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(3).constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertTrue(fields.at(4).constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(4).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique), QgsFieldConstraints.ConstraintOriginProvider)
def testSkipConstraintCheck(self):
vl = QgsVectorLayer("dbname=%s table=test_autoincrement" % self.dbname, "test_autoincrement",
"spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().skipConstraintCheck(0, QgsFieldConstraints.ConstraintUnique, str("Autogenerate")))
self.assertFalse(vl.dataProvider().skipConstraintCheck(0, QgsFieldConstraints.ConstraintUnique, 123))
# This test would fail. It would require turning on WAL
def XXXXXtestLocking(self):
temp_dbname = self.dbname + '.locking'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" % temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('POLYGON((0 0,1 0,1 1,0 1,0 0))')))
# The iterator will take one extra connection
myiter = vl.getFeatures()
# Consume one feature but the iterator is still opened
f = next(myiter)
self.assertTrue(f.isValid())
self.assertTrue(vl.commitChanges())
def testDefaultValues(self):
l = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" % self.dbname, "test_defaults", "spatialite")
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().defaultValue(1), "qgis 'is good")
self.assertEqual(l.dataProvider().defaultValue(2), 5)
self.assertEqual(l.dataProvider().defaultValue(3), 5.7)
self.assertFalse(l.dataProvider().defaultValue(4))
def testVectorLayerUtilsCreateFeatureWithProviderDefaultLiteral(self):
vl = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" % self.dbname, "test_defaults", "spatialite")
self.assertEqual(vl.dataProvider().defaultValue(2), 5)
f = QgsVectorLayerUtils.createFeature(vl)
self.assertEqual(f.attributes(), [None, "qgis 'is good", 5, 5.7, None])
# check that provider default literals do not take precedence over passed attribute values
f = QgsVectorLayerUtils.createFeature(vl, attributes={1: 'qgis is great', 0: 3})
self.assertEqual(f.attributes(), [3, "qgis is great", 5, 5.7, None])
# test that vector layer default value expression overrides provider default literal
vl.setDefaultValueDefinition(3, QgsDefaultValue("4*3"))
f = QgsVectorLayerUtils.createFeature(vl, attributes={1: 'qgis is great', 0: 3})
self.assertEqual(f.attributes(), [3, "qgis is great", 5, 12, None])
def testCreateAttributeIndex(self):
vl = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" % self.dbname, "test_defaults", "spatialite")
self.assertTrue(vl.dataProvider().capabilities() & QgsVectorDataProvider.CreateAttributeIndex)
self.assertFalse(vl.dataProvider().createAttributeIndex(-1))
self.assertFalse(vl.dataProvider().createAttributeIndex(100))
self.assertTrue(vl.dataProvider().createAttributeIndex(1))
con = spatialite_connect(self.dbname, isolation_level=None)
cur = con.cursor()
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test_defaults'")
res = [row for row in rs]
self.assertEqual(len(res), 1)
index_name = res[0][1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
self.assertEqual(res[0][2], 'name')
# second index
self.assertTrue(vl.dataProvider().createAttributeIndex(2))
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test_defaults'")
res = [row for row in rs]
self.assertEqual(len(res), 2)
indexed_columns = []
for row in res:
index_name = row[1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
indexed_columns.append(res[0][2])
self.assertEqual(set(indexed_columns), set(['name', 'number']))
con.close()
def testSubsetStringRegexp(self):
"""Check that the provider supports the REGEXP syntax"""
testPath = "dbname=%s table='test_filter' (geometry) key='id'" % self.dbname
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
vl.setSubsetString('"name" REGEXP \'[txe]\'')
self.assertEqual(vl.featureCount(), 4)
del(vl)
def testSubsetStringExtent_bug17863(self):
"""Check that the extent is correct when applied in the ctor and when
modified after a subset string is set """
def _lessdigits(s):
return re.sub(r'(\d+\.\d{3})\d+', r'\1', s)
testPath = "dbname=%s table='test_filter' (geometry) key='id'" % self.dbname
subSetString = '"name" = \'int\''
subSet = ' sql=%s' % subSetString
# unfiltered
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 8)
unfiltered_extent = _lessdigits(vl.extent().toString())
self.assertNotEqual('Empty', unfiltered_extent)
del(vl)
# filter after construction ...
subSet_vl2 = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertEqual(_lessdigits(subSet_vl2.extent().toString()), unfiltered_extent)
self.assertEqual(subSet_vl2.featureCount(), 8)
# ... apply filter now!
subSet_vl2.setSubsetString(subSetString)
self.assertEqual(subSet_vl2.featureCount(), 4)
self.assertEqual(subSet_vl2.subsetString(), subSetString)
self.assertNotEqual(_lessdigits(subSet_vl2.extent().toString()), unfiltered_extent)
filtered_extent = _lessdigits(subSet_vl2.extent().toString())
del(subSet_vl2)
# filtered in constructor
subSet_vl = QgsVectorLayer(testPath + subSet, 'subset_test', 'spatialite')
self.assertEqual(subSet_vl.subsetString(), subSetString)
self.assertTrue(subSet_vl.isValid())
# This was failing in bug 17863
self.assertEqual(subSet_vl.featureCount(), 4)
self.assertEqual(_lessdigits(subSet_vl.extent().toString()), filtered_extent)
self.assertNotEqual(_lessdigits(subSet_vl.extent().toString()), unfiltered_extent)
self.assertTrue(subSet_vl.setSubsetString(''))
self.assertEqual(subSet_vl.featureCount(), 8)
self.assertEqual(_lessdigits(subSet_vl.extent().toString()), unfiltered_extent)
def testDecodeUri(self):
"""Check that the provider URI decoding returns expected values"""
filename = '/home/to/path/test.db'
uri = 'dbname=\'{}\' table="test" (geometry) sql='.format(filename)
registry = QgsProviderRegistry.instance()
components = registry.decodeUri('spatialite', uri)
self.assertEqual(components['path'], filename)
def testPKNotInt(self):
""" Check when primary key is not an integer """
# create test db
dbname = os.path.join(tempfile.mkdtemp(), "test_pknotint.sqlite")
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
# try the two different types of index creation
for index_creation_method in ['CreateSpatialIndex', 'CreateMbrCache']:
table_name = "pk_is_string_{}".format(index_creation_method)
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# create table with spatial index and pk is string
sql = "CREATE TABLE {}(id VARCHAR PRIMARY KEY NOT NULL, name TEXT NOT NULL);"
cur.execute(sql.format(table_name))
sql = "SELECT AddGeometryColumn('{}', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql.format(table_name))
sql = "SELECT {}('{}', 'geometry')"
cur.execute(sql.format(index_creation_method, table_name))
sql = "insert into {} ('id', 'name', 'geometry') values( 'test_id', 'test_name', st_geomfromtext('POINT(1 2)', 4326))"
cur.execute(sql.format(table_name))
cur.execute("COMMIT")
testPath = "dbname={} table='{}' (geometry)".format(dbname, table_name)
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
# make spatial request to force the index use
request = QgsFeatureRequest(QgsRectangle(0, 0, 2, 3))
feature = next(vl.getFeatures(request), None)
self.assertTrue(feature)
self.assertEqual(feature.id(), 1)
point = feature.geometry().asPoint()
self.assertTrue(point)
self.assertEqual(point.x(), 1)
self.assertEqual(point.y(), 2)
con.close()
basepath, filename = os.path.split(dbname)
shutil.rmtree(basepath)
def testLoadStyle(self):
"""Check that we can store and load a style"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_loadstyle.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES (1, 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
testPath = "dbname=%s table='test_pg' (geometry) key='id'" % dbname
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
err, ok = vl.loadDefaultStyle()
self.assertFalse(ok)
vl.saveStyleToDatabase('my_style', 'My description', True, '')
err, ok = vl.loadDefaultStyle()
self.assertTrue(ok)
def _aliased_sql_helper(self, dbname):
queries = (
'(SELECT * FROM (SELECT * from \\"some view\\"))',
'(SELECT * FROM \\"some view\\")',
'(select sd.* from somedata as sd left join somedata as sd2 on ( sd2.name = sd.name ))',
'(select sd.* from \\"somedata\\" as sd left join \\"somedata\\" as sd2 on ( sd2.name = sd.name ))',
"(SELECT * FROM somedata as my_alias1\n)",
"(SELECT * FROM somedata as my_alias2)",
"(SELECT * FROM somedata AS my_alias3)",
'(SELECT * FROM \\"somedata\\" as my_alias4\n)',
'(SELECT * FROM (SELECT * FROM \\"somedata\\"))',
'(SELECT my_alias5.* FROM (SELECT * FROM \\"somedata\\") AS my_alias5)',
'(SELECT my_alias6.* FROM (SELECT * FROM \\"somedata\\" as my_alias\n) AS my_alias6)',
'(SELECT my_alias7.* FROM (SELECT * FROM \\"somedata\\" as my_alias\n) AS my_alias7\n)',
'(SELECT my_alias8.* FROM (SELECT * FROM \\"some data\\") AS my_alias8)',
'(SELECT my_alias9.* FROM (SELECT * FROM \\"some data\\" as my_alias\n) AS my_alias9)',
'(SELECT my_alias10.* FROM (SELECT * FROM \\"some data\\" as my_alias\n) AS my_alias10\n)',
'(select sd.* from \\"some data\\" as sd left join \\"some data\\" as sd2 on ( sd2.name = sd.name ))',
'(SELECT * FROM \\"some data\\" as my_alias11\n)',
'(SELECT * FROM \\"some data\\" as my_alias12)',
'(SELECT * FROM \\"some data\\" AS my_alias13)',
'(SELECT * from \\"some data\\" AS my_alias14\n)',
'(SELECT * FROM (SELECT * from \\"some data\\"))',
)
for sql in queries:
vl = QgsVectorLayer('dbname=\'{}\' table="{}" (geom) sql='.format(dbname, sql), 'test', 'spatialite')
self.assertTrue(vl.isValid(), 'dbname: {} - sql: {}'.format(dbname, sql))
self.assertTrue(vl.featureCount() > 1)
self.assertTrue(vl.isSpatial())
def testPkLessQuery(self):
"""Test if features in queries with/without pk can be retrieved by id"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_pkless.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE \"test pk\" (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test pk', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
for i in range(11, 21):
sql = "INSERT INTO \"test pk\" (id, name, geometry) "
sql += "VALUES ({id}, 'name {id}', GeomFromText('POINT({id} {id})', 4326))".format(id=i)
cur.execute(sql)
def _make_table(table_name):
# simple table without primary key
sql = "CREATE TABLE \"%s\" (name TEXT NOT NULL)" % table_name
cur.execute(sql)
sql = "SELECT AddGeometryColumn('%s', 'geom', 4326, 'POINT', 'XY')" % table_name
cur.execute(sql)
for i in range(11, 21):
sql = "INSERT INTO \"%s\" (name, geom) " % table_name
sql += "VALUES ('name {id}', GeomFromText('POINT({id} {id})', 4326))".format(id=i)
cur.execute(sql)
_make_table("somedata")
_make_table("some data")
sql = "CREATE VIEW \"some view\" AS SELECT * FROM \"somedata\""
cur.execute(sql)
cur.execute("COMMIT")
con.close()
def _check_features(vl, offset):
self.assertEqual(vl.featureCount(), 10)
i = 11
for f in vl.getFeatures():
self.assertTrue(f.isValid())
self.assertTrue(vl.getFeature(i - offset).isValid())
self.assertEqual(vl.getFeature(i - offset)['name'], 'name {id}'.format(id=i))
self.assertEqual(f.id(), i - offset)
self.assertEqual(f['name'], 'name {id}'.format(id=i))
self.assertEqual(f.geometry().asWkt(), 'Point ({id} {id})'.format(id=i))
i += 1
vl_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from \\"test pk\\")" (geometry) sql=' % dbname, 'pk', 'spatialite')
self.assertTrue(vl_pk.isValid())
_check_features(vl_pk, 0)
vl_no_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from somedata)" (geom) sql=' % dbname, 'pk', 'spatialite')
self.assertTrue(vl_no_pk.isValid())
_check_features(vl_no_pk, 10)
vl_no_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from \\"some data\\")" (geom) sql=' % dbname, 'pk', 'spatialite')
self.assertTrue(vl_no_pk.isValid())
_check_features(vl_no_pk, 10)
# Test regression when sending queries with aliased tables from DB manager
self._aliased_sql_helper(dbname)
def testAliasedQueries(self):
"""Test regression when sending queries with aliased tables from DB manager"""
dbname = TEST_DATA_DIR + '/provider/spatialite.db'
self._aliased_sql_helper(dbname)
def testTextPks(self):
"""Test regression when retrieving features from tables with text PKs, see #21176"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_text_pks.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id TEXT NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES ('one', 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES ('two', 'bogo', GeomFromText('POLYGON((0 0,2 0,2 2,0 2,0 0))', 4326))"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
def _test_db(testPath):
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
f = next(vl.getFeatures())
self.assertTrue(f.isValid())
fid = f.id()
self.assertTrue(fid > 0)
self.assertTrue(vl.getFeature(fid).isValid())
f2 = next(vl.getFeatures(QgsFeatureRequest().setFilterFid(fid)))
self.assertTrue(f2.isValid())
self.assertEqual(f2.id(), f.id())
self.assertEqual(f2.geometry().asWkt(), f.geometry().asWkt())
for f in vl.getFeatures():
self.assertTrue(f.isValid())
self.assertTrue(vl.getFeature(f.id()).isValid())
self.assertEqual(vl.getFeature(f.id()).id(), f.id())
testPath = "dbname=%s table='test_pg' (geometry) key='id'" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg' (geometry)" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg' key='id'" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg'" % dbname
_test_db(testPath)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
linebp/pandas | pandas/core/sparse/list.py | 9 | 4058 | import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_scalar
from pandas.core.sparse.array import SparseArray
from pandas.util._validators import validate_bool_kwarg
import pandas._libs.sparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
# see gh-13784
warnings.warn("SparseList is deprecated and will be removed "
"in a future version", FutureWarning, stacklevel=2)
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if is_scalar(value):
value = [value]
sparr = SparseArray(value, fill_value=self.fill_value)
self._chunks.append(sparr)
self._consolidated = False
def _concat_sparse_indexes(indexes):
all_indices = []
total_length = 0
for index in indexes:
# increment by offset
inds = index.to_int_index().indices + total_length
all_indices.append(inds)
total_length += index.length
return splib.IntIndex(total_length, np.concatenate(all_indices))
| bsd-3-clause |
vktr/CouchPotatoServer | libs/guessit/transfo/guess_date.py | 150 | 1217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.date import search_date
import logging
log = logging.getLogger(__name__)
def guess_date(string):
date, span = search_date(string)
if date:
return { 'date': date }, span
else:
return None, None
def process(mtree):
SingleNodeGuesser(guess_date, 1.0, log).process(mtree)
| gpl-3.0 |
frewsxcv/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/Subversion.py | 19 | 2550 | """SCons.Tool.Subversion.py
Tool-specific initialization for Subversion.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/Subversion.py 4043 2009/02/23 09:06:45 scons"
import os.path
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
Subversion to an Environment."""
def SubversionFactory(repos, module='', env=env):
""" """
# fail if repos is not an absolute path name?
if module != '':
module = os.path.join(module, '')
act = SCons.Action.Action('$SVNCOM', '$SVNCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
SVNREPOSITORY = repos,
SVNMODULE = module)
#setattr(env, 'Subversion', SubversionFactory)
env.Subversion = SubversionFactory
env['SVN'] = 'svn'
env['SVNFLAGS'] = SCons.Util.CLVar('')
env['SVNCOM'] = '$SVN $SVNFLAGS cat $SVNREPOSITORY/$SVNMODULE$TARGET > $TARGET'
def exists(env):
return env.Detect('svn')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
xiiicyw/Data-Wrangling-with-MongoDB | Lesson_2_Problem_Set/03-Processing_All/process.py | 1 | 2829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Let's assume that you combined the code from the previous 2 exercises
# with code from the lesson on how to build requests, and downloaded all the data locally.
# The files are in a directory "data", named after the carrier and airport:
# "{}-{}.html".format(carrier, airport), for example "FL-ATL.html".
# The table with flight info has a table class="dataTDRight".
# There are couple of helper functions to deal with the data files.
# Please do not change them for grading purposes.
# All your changes should be in the 'process_file' function
from bs4 import BeautifulSoup
from zipfile import ZipFile
import os
datadir = "data"
def open_zip(datadir):
with ZipFile('{0}.zip'.format(datadir), 'r') as myzip:
myzip.extractall()
def process_all(datadir):
files = os.listdir(datadir)
return files
def process_file(f):
# This is example of the datastructure you should return
# Each item in the list should be a dictionary containing all the relevant data
# Note - year, month, and the flight data should be integers
# You should skip the rows that contain the TOTAL data for a year
# data = [{"courier": "FL",
# "airport": "ATL",
# "year": 2012,
# "month": 12,
# "flights": {"domestic": 100,
# "international": 100}
# },
# {"courier": "..."}
# ]
data = []
info = {}
info["courier"], info["airport"] = f[:6].split("-")
# Note: create a new dictionary for each entry in the output data list.
# If you use the info dictionary defined here each element in the list
# will be a reference to the same info dictionary.
with open("{}/{}".format(datadir, f), "r") as html:
soup = BeautifulSoup(html, "html.parser")
tr = soup.find_all("tr", attrs={"class": "dataTDRight"})
for element in tr:
text = element.find_all("td")
if text[1].get_text() != "TOTAL":
info["year"] = int(text[0].get_text())
info["month"] = int(text[1].get_text())
info["flights"] = {"domestic": int(text[2].get_text().replace(",", "")),
"international": int(text[3].get_text().replace(",", ""))}
data.append(info)
return data
def test():
print "Running a simple test..."
open_zip(datadir)
files = process_all(datadir)
data = []
for f in files:
data += process_file(f)
assert len(data) == 3
for entry in data[:3]:
assert type(entry["year"]) == int
assert type(entry["flights"]["domestic"]) == int
assert len(entry["airport"]) == 3
assert len(entry["courier"]) == 2
print "... success!"
if __name__ == "__main__":
test() | agpl-3.0 |
nzlosh/st2 | st2actions/tests/unit/test_actions_registrar.py | 3 | 7947 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import jsonschema
import mock
import yaml
import st2common.bootstrap.actionsregistrar as actions_registrar
from st2common.persistence.action import Action
import st2common.validators.api.action as action_validator
from st2common.models.db.runner import RunnerTypeDB
import st2tests.base as tests_base
import st2tests.fixturesloader as fixtures_loader
from st2tests.fixturesloader import get_fixtures_base_path
MOCK_RUNNER_TYPE_DB = RunnerTypeDB(name="run-local", runner_module="st2.runners.local")
# NOTE: We need to perform this patching because test fixtures are located outside of the packs
# base paths directory. This will never happen outside the context of test fixtures.
@mock.patch(
"st2common.content.utils.get_pack_base_path",
mock.Mock(return_value=os.path.join(get_fixtures_base_path(), "generic")),
)
class ActionsRegistrarTest(tests_base.DbTestCase):
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_register_all_actions(self):
try:
packs_base_path = fixtures_loader.get_fixtures_base_path()
all_actions_in_db = Action.get_all()
actions_registrar.register_actions(packs_base_paths=[packs_base_path])
except Exception as e:
print(six.text_type(e))
self.fail("All actions must be registered without exceptions.")
else:
all_actions_in_db = Action.get_all()
self.assertTrue(len(all_actions_in_db) > 0)
# Assert metadata_file field is populated
expected_path = "actions/action-with-no-parameters.yaml"
self.assertEqual(all_actions_in_db[0].metadata_file, expected_path)
def test_register_actions_from_bad_pack(self):
packs_base_path = tests_base.get_fixtures_path()
try:
actions_registrar.register_actions(packs_base_paths=[packs_base_path])
self.fail("Should have thrown.")
except:
pass
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_pack_name_missing(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action_3_pack_missing.yaml"
)
registrar._register_action("dummy", action_file)
action_name = None
with open(action_file, "r") as fd:
content = yaml.safe_load(fd)
action_name = str(content["name"])
action_db = Action.get_by_name(action_name)
expected_msg = "Content pack must be set to dummy"
self.assertEqual(action_db.pack, "dummy", expected_msg)
Action.delete(action_db)
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_register_action_with_no_params(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action-with-no-parameters.yaml"
)
self.assertEqual(registrar._register_action("dummy", action_file), None)
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_register_action_invalid_parameter_type_attribute(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action_invalid_param_type.yaml"
)
expected_msg = "'list' is not valid under any of the given schema"
self.assertRaisesRegexp(
jsonschema.ValidationError,
expected_msg,
registrar._register_action,
"dummy",
action_file,
)
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_register_action_invalid_parameter_name(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action_invalid_parameter_name.yaml"
)
expected_msg = (
'Parameter name "action-name" is invalid. Valid characters for '
"parameter name are"
)
self.assertRaisesRegexp(
jsonschema.ValidationError,
expected_msg,
registrar._register_action,
"generic",
action_file,
)
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_invalid_params_schema(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action-invalid-schema-params.yaml"
)
try:
registrar._register_action("generic", action_file)
self.fail("Invalid action schema. Should have failed.")
except jsonschema.ValidationError:
pass
@mock.patch.object(
action_validator, "_is_valid_pack", mock.MagicMock(return_value=True)
)
@mock.patch.object(
action_validator,
"get_runner_model",
mock.MagicMock(return_value=MOCK_RUNNER_TYPE_DB),
)
def test_action_update(self):
registrar = actions_registrar.ActionsRegistrar()
loader = fixtures_loader.FixturesLoader()
action_file = loader.get_fixture_file_path_abs(
"generic", "actions", "action1.yaml"
)
registrar._register_action("wolfpack", action_file)
# try registering again. this should not throw errors.
registrar._register_action("wolfpack", action_file)
action_name = None
with open(action_file, "r") as fd:
content = yaml.safe_load(fd)
action_name = str(content["name"])
action_db = Action.get_by_name(action_name)
expected_msg = "Content pack must be set to wolfpack"
self.assertEqual(action_db.pack, "wolfpack", expected_msg)
Action.delete(action_db)
| apache-2.0 |
ldgabbay/foostache-python | setup.py | 1 | 2313 | import io
import os
import re
from setuptools import setup
import sys
PACKAGES = [
"foostache",
]
INSTALL_REQUIRES = [
"future~=0.17.1",
"ujson~=1.35",
]
if sys.version_info[0] == 2:
PACKAGES.append("foostache.py2parser")
INSTALL_REQUIRES.append("antlr4-python2-runtime~=4.7.2")
elif sys.version_info[0] == 3:
PACKAGES.append("foostache.py3parser")
INSTALL_REQUIRES.append("antlr4-python3-runtime~=4.7.2")
else:
raise RuntimeError("Unhandled Python version.")
def read(*paths):
with io.open(os.path.join(*paths), encoding="utf_8") as f:
return f.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
HERE = os.path.abspath(os.path.dirname(__file__))
setup(
name="foostache",
version=find_version("src", "foostache", "__init__.py"),
description="The foostache template engine.",
long_description=read(HERE, "README.md"),
long_description_content_type="text/markdown",
url="https://github.com/ldgabbay/foostache-python/",
author="Lynn Gabbay",
author_email="gabbay@gmail.com",
license="MIT",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing",
],
packages=PACKAGES,
package_dir={"": "src"},
entry_points={
"console_scripts": [
"foostache = foostache.__main__:main",
],
},
test_suite="tests",
install_requires=INSTALL_REQUIRES,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
)
| mit |
shreyans800755/coala | tests/results/result_actions/DoNothingActionTest.py | 14 | 1350 | import unittest
from coala_utils.ContextManagers import retrieve_stdout
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.DoNothingAction import DoNothingAction
from coalib.settings.Section import Section, Setting
class DoNothingActionTest(unittest.TestCase):
def setUp(self):
self.uut = DoNothingAction()
self.file_dict = {'a': ['a\n', 'b\n', 'c\n'], 'b': ['old_first\n']}
self.diff_dict = {'a': Diff(self.file_dict['a']),
'b': Diff(self.file_dict['b'])}
self.diff_dict['a'].add_lines(1, ['test\n'])
self.diff_dict['a'].delete_line(3)
self.diff_dict['b'].add_lines(0, ['first\n'])
self.test_result = Result('origin', 'message', diffs=self.diff_dict)
self.section = Section('name')
self.section.append(Setting('colored', 'false'))
def test_is_applicable(self):
diff = Diff([], rename='new_name')
result = Result('', '', diffs={'f': diff})
self.assertTrue(self.uut.is_applicable(result, {}, {'f': diff}))
def test_apply(self):
with retrieve_stdout() as stdout:
self.assertEqual(self.uut.apply(self.test_result,
self.file_dict,
{}), None)
| agpl-3.0 |
joakim-hove/django | tests/forms_tests/tests/test_input_formats.py | 313 | 38501 | from datetime import date, datetime, time
from django import forms
from django.test import SimpleTestCase, override_settings
from django.utils.translation import activate, deactivate
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True)
class LocalizedTimeTests(SimpleTestCase):
def setUp(self):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M']
activate('nl')
def tearDown(self):
deactivate()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean('13:30:05.000155')
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
class CustomTimeInputFormatsTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('01:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
class SimpleTimeFormatTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"Localized TimeFields in a non-localized environment act as unlocalized widgets"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True)
class LocalizedDateTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
class CustomDateInputFormatsTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
class SimpleDateFormatTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"Localized DateFields in a non-localized environment act as unlocalized widgets"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True)
class LocalizedDateTimeTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"])
class CustomDateTimeInputFormatsTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
class SimpleDateTimeFormatTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"Localized DateTimeFields in a non-localized environment act as unlocalized widgets"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| bsd-3-clause |
kmaehashi/sensorbee-python | example/websocket_example.py | 1 | 1249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WebSocket Example
``pip install websocket-client`` to use websocket client API.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from pysensorbee import SensorBeeAPI
import time
#import websocket
#websocket.enableTrace(True)
class WebSocketExample(object):
def main(self, topology='ws_test'):
api = SensorBeeAPI()
api.create_topology(topology)
try:
client = api.wsquery(topology)
client.start()
def callback(client, rid, type, msg):
print("-------------------------------")
print("RID: {0}".format(rid))
print("Type: {0}".format(type))
print("Message: {0}".format(msg))
client.send("eval 1+2+3;", callback, 1)
client.send("invalid_syntax", callback, 2)
api.query(topology, "CREATE SOURCE node_stats TYPE node_statuses") # sync
client.send("SELECT RSTREAM * FROM node_stats [RANGE 1 TUPLES];", callback, 3)
time.sleep(5)
finally:
api.delete_topology(topology)
time.sleep(3)
if __name__ == '__main__':
WebSocketExample().main()
| mit |
js850/PyGMIN | pygmin/systems/basesystem.py | 1 | 16801 | import tempfile
from pygmin.landscape import DoubleEndedConnect, DoubleEndedConnectPar
from pygmin import basinhopping
from pygmin.storage import Database
from pygmin.takestep import RandomDisplacement, AdaptiveStepsizeTemperature
from pygmin.utils.xyz import write_xyz
from pygmin.optimize import mylbfgs
from pygmin.transition_states._nebdriver import NEBDriver
from pygmin.transition_states import FindTransitionState
from pygmin.thermodynamics import logproduct_freq2, normalmodes
__all__ = ["BaseParameters", "Parameters", "dict_copy_update", "BaseSystem"]
#class NotImplemented(BaseException):
# """
# The exception to return if there is a feature
# in the System is not implemented yet
# """
# pass
class BaseParameters(dict):
"""define a dictionary who's values can be accessed like attributes
if `params` is a BaseParameters object then this command::
base_params["key"] = value
is the same as::
base_params.key = value
This only works for keys that are strings
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Parameters(BaseParameters):
"""Define the parameter tree for use with BaseSystem class"""
def __init__(self):
self["database"] = BaseParameters()
self["basinhopping"] = BaseParameters()
self["takestep"] = BaseParameters()
self.structural_quench_params = BaseParameters()
self.gui = BaseParameters()
self.double_ended_connect = BaseParameters()
self.double_ended_connect.local_connect_params = BaseParameters()
self.double_ended_connect.local_connect_params.pushoff_params = BaseParameters()
self.double_ended_connect.local_connect_params.tsSearchParams = BaseParameters(FindTransitionState.params())
self.double_ended_connect.local_connect_params.NEBparams = BaseParameters(NEBDriver.params())
#self.double_ended_connect.local_connect_params.tsSearchParams.lowestEigenvectorQuenchParams = BaseParameters()
#self.double_ended_connect.local_connect_params.tsSearchParams.tangentSpaceQuenchParams = BaseParameters()
def dict_copy_update(dict1, dict2):
"""return a new dictionary from the union of dict1 and dict2.
If there are conflicts, take the value in dict2"""
newdict = dict1.copy()
newdict.update(dict2)
return newdict
class BaseSystem(object):
"""
this class defines a base class for a System object
Notes
-------------
The following functions need to be overloaded for running
the given routines
Global Optimization::
1. get_potential : required
#. get_takestep : optional
#. get_random_configuration : optional
#. get_compare_exact : optional
Connecting Minima and Transition State Searches::
1. get_potential : required
#. get_mindist : required
#. get_orthogonalize_to_zero_eigenvectors : required
#. get_compare_exact : optional, recommended
thermodynamics::
1. get_metric_tensor
GUI::
1. all of the above functions are required, plus
#. draw : required
#. smooth_path : required
#. load_coords_pymol : recommended
additionally, it's a very good idea to specify the accuracy in the
database using self.params.database.accuracy
See the method documentation for more information and relevant links
"""
def __init__(self, *args, **kwargs):
self.params = Parameters()
# self.params.double_ended_connect.local_connect_params.NEBparams.NEBquenchParams.maxErise = 1e50
def __call__(self):
"""calling a system returns itself
this exists soley for the gui. this should be rewritten
"""
return self
def get_potential(self):
"""return the potential object
See Also
--------
pygmin.potentials
"""
raise NotImplementedError
def get_random_configuration(self):
"""a starting point for basinhopping, etc."""
raise NotImplementedError
def get_random_minimized_configuration(self):
coords = self.get_random_configuration()
quencher = self.get_minimizer()
return quencher(coords)
def get_minimizer(self, **kwargs):
"""return a function to minimize the structure"""
pot = self.get_potential()
kwargs = dict_copy_update(self.params["structural_quench_params"], kwargs)
return lambda coords: mylbfgs(coords, pot, **kwargs)
def get_compare_exact(self):
"""object that returns True if two structures are exact.
true_false = compare_exact(min1, min2)
See Also
--------
pygmin.mindist
"""
raise NotImplementedError
def get_compare_minima(self):
"""a wrapper for compare exact so in input can be in
Minimum Form"""
compare = self.get_compare_exact()
if compare is None:
return None
return lambda m1, m2: compare(m1.coords, m2.coords)
def create_database(self, *args, **kwargs):
"""return a new database object
See Also
--------
pygmin.storage
"""
kwargs = dict_copy_update(self.params["database"], kwargs)
#note this syntax is quite ugly, but we would like to be able to
#create a new database by passing the filename as the first arg,
#not as a kwarg.
if len(args) > 1:
raise ValueError("create_database can only take one non-keyword argument")
if len(args) == 1:
if "db" not in kwargs:
kwargs["db"] = args[0]
#get a routine to compare the minima as exact
try:
if not "compareMinima" in kwargs:
try:
compare_minima = self.get_compare_minima()
kwargs["compareMinima"] = compare_minima
except NotImplementedError:
pass
except NotImplementedError:
#compareMinima is optional
pass
return Database(**kwargs)
def get_takestep(self, stepsize=0.6, **kwargs):
"""return the takestep object for use in basinhopping, etc.
default is random displacement with adaptive step size
adaptive temperature
See Also
--------
pygmin.takestep
"""
kwargs = dict_copy_update(self.params["takestep"], kwargs)
takeStep = RandomDisplacement(stepsize=stepsize)
tsAdaptive = AdaptiveStepsizeTemperature(takeStep, **kwargs)
return tsAdaptive
def get_basinhopping(self, database=None, takestep=None, coords=None, add_minimum=None,
**kwargs):
"""return the basinhopping object with takestep
and accept step already implemented
See Also
--------
pygmin.basinhopping
"""
kwargs = dict_copy_update(self.params["basinhopping"], kwargs)
pot = self.get_potential()
if coords is None:
coords = self.get_random_configuration()
if takestep is None:
takestep = self.get_takestep()
if add_minimum is None:
if database is None:
database = self.create_database()
add_minimum = database.minimum_adder()
bh = basinhopping.BasinHopping(coords, pot, takestep, quench=self.get_minimizer(),
storage=add_minimum,
**kwargs)
return bh
def get_mindist(self):
"""return a mindist object that is callable with the form
dist, X1new, X2new = mindist(X1, X2)
Notes
-----
the mindist object returns returns the best alignment between two
configurations, taking into account all global symmetries
See Also
--------
pygmin.mindist
"""
raise NotImplementedError
def get_orthogonalize_to_zero_eigenvectors(self):
"""return a function which makes a vector orthogonal to the known zero
eigenvectors (the eigenvectors with zero eigenvalues. It should
be callable with the form::
vec = orthogVec(vec, coords)
See Also
--------
pygmin.transition_states
"""
raise NotImplementedError
def get_double_ended_connect(self, min1, min2, database, parallel=False, **kwargs):
"""return a DoubleEndedConnect object
See Also
--------
pygmin.landscape
"""
kwargs = dict_copy_update(self.params["double_ended_connect"], kwargs)
pot = self.get_potential()
mindist = self.get_mindist()
#attach the function which orthogonalizes to known zero eigenvectors.
#This is amazingly ugly
# vr: yea, we should polish this parameters stuff and give create policies instead!
try:
kwargs["local_connect_params"]["tsSearchParams"]["orthogZeroEigs"]
except KeyError:
if "local_connect_params" in kwargs:
lcp = kwargs["local_connect_params"]
else:
lcp = kwargs["local_connect_params"] = BaseParameters()
if "tsSearchParams" in lcp:
tssp = lcp["tsSearchParams"]
else:
tssp = lcp["tsSearchParams"] = BaseParameters()
if not "orthogZeroEigs" in tssp:
tssp["orthogZeroEigs"] = self.get_orthogonalize_to_zero_eigenvectors()
try:
kwargs["local_connect_params"]["pushoff_params"]["quench"]
except:
if not "pushoff_params" in kwargs["local_connect_params"]:
kwargs["local_connect_params"]["pushoff_params"] = BaseParameters()
kwargs["local_connect_params"]["pushoff_params"]["quench"] = self.get_minimizer()
if parallel:
return DoubleEndedConnectPar(min1, min2, pot, mindist, database, **kwargs)
else:
return DoubleEndedConnect(min1, min2, pot, mindist, database, **kwargs)
#
# the following functions used for getting thermodynamic information about the minima
#
def get_pgorder(self, coords):
"""return the point group order of the configuration
Notes
-----
This is a measure of the symmetry of a configuration. It is used in
calculating the thermodynamic weight of a minimum. Most configurations
will have pgorder 1, but some highly symmetric minima will have higher orders.
Routines to compute the point group order are in module `mindist`.
See Also
--------
pygmin.mindist
"""
raise NotImplementedError
def get_metric_tensor(self, coords):
"""return (mass-weighted) metric tensor for given coordinates
Notes
-----
The metric tensor is needed for normal mode analysis. In the simplest case it is just the identity.
For atomic systems (cartesian coordinates) with masses different to 1.0, the metric tensor
is a diagonal matrix with 1/m_i on the diagonal.
For curvilinear coordinates like angle axis coordinates it is more complicated.
See Also
--------
pygmin.landscape.smoothPath
"""
raise NotImplementedError
def get_nzero_modes(self):
"""return the number of vibration modes with zero frequency
Notes
-----
Zero modes can come from a number of different sources. You will have one
zero mode for every symmetry in the Hamiltonian. e.g. 3 zero modes for
translational invariance and 3 zero modes for rotational invariance. If
you have extra degrees of freedom, from say frozen particles they will
contribute zero modes.
Harmonic modes are necessary to calculate the free energy of a minimum in
the harmonic approximation. The density of states is inversly proportional
to the product of the frequencies. If the zero modes are not accounted for
correctly then the product will be trivially zero and the free energy will
be completely wrong.
"""
raise NotImplementedError
def get_normalmodes(self, coords):
"""return the squared normal mode frequencies and eigenvectors
"""
mt = self.get_metric_tensor(coords)
pot = self.get_potential()
hess = pot.getHessian(coords)
freqs, vecs = normalmodes(hess, mt)
return freqs, vecs
def get_log_product_normalmode_freq(self, coords, nnegative=0):
"""return the log product of the squared normal mode frequencies
Parameters
----------
coords : array
nnegative : int, optional
number of expected negative eigenvalues
Notes
-----
this is necessary to calculate the free energy contribution of a minimum
"""
nzero = self.get_nzero_modes()
freqs, vecs = self.get_normalmodes(coords)
n, lprod = logproduct_freq2(freqs, nzero, nnegative=nnegative)
return lprod
#
#the following functions are used only for the GUI
#
def draw(self, coords, index):
"""
tell the gui how to represent your system using openGL objects
Parameters
----------
coords : array
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so they should be
visually distinct, e.g. different colors. accepted values are 1 or 2
"""
raise NotImplementedError
def smooth_path(self, images):
"""return a smoothed path between two configurations.
used for movies
See Also
--------
pygmin.landscape.smoothPath
"""
raise NotImplementedError
def createNEB(self):
""" """
raise NotImplementedError
def load_coords_pymol(self, coordslist, oname, index=1):
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so they should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
#pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
#create the temporary file (.xyz or .pdb, or whatever else pymol can read)
#note: this is the part that will be really system dependent.
f = tempfile.NamedTemporaryFile(mode="w", suffix=".xyz")
fname = f.name
#write the coords into file
for coords in coordslist:
write_xyz(f, coords, title=oname)
f.flush()
#load the molecule from the temporary file
pymol.cmd.load(fname)
#get name of the object just create and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
#here you might want to change the representation of the molecule, e.g.
# >>> pymol.cmd.hide("everything", oname)
# >>> pymol.cmd.show("spheres", oname)
#set the color according to index
if index == 1:
pymol.cmd.color("red", oname)
else:
pymol.cmd.color("gray", oname)
if __name__ == "__main__":
mysys = BaseSystem()
mysys.get_potential()
mysys.get_basinhopping()
| gpl-3.0 |
b1-systems/kiwi | test/unit/package_manager_zypper_test.py | 1 | 6581 | from mock import patch
import mock
from .test_helper import raises
from kiwi.package_manager.zypper import PackageManagerZypper
from kiwi.exceptions import KiwiRequestError
class TestPackageManagerZypper(object):
def setup(self):
repository = mock.Mock()
repository.root_dir = 'root-dir'
root_bind = mock.Mock()
root_bind.move_to_root = mock.Mock(
return_value=['root-moved-arguments']
)
repository.root_bind = root_bind
self.command_env = {
'HOME': '/home/ms', 'ZYPP_CONF': 'root-dir/my/zypp.conf'
}
repository.runtime_config = mock.MagicMock(
return_value={
'zypper_args': ['--reposd-dir', 'root-dir/my/repos'],
'command_env': self.command_env
}
)
self.manager = PackageManagerZypper(repository)
self.chroot_zypper_args = self.manager.root_bind.move_to_root(
self.manager.zypper_args
)
self.chroot_command_env = self.manager.command_env
zypp_conf = self.manager.command_env['ZYPP_CONF']
self.chroot_command_env['ZYPP_CONF'] = \
self.manager.root_bind.move_to_root(zypp_conf)[0]
def test_request_package(self):
self.manager.request_package('name')
assert self.manager.package_requests == ['name']
def test_request_collection(self):
self.manager.request_collection('name')
assert self.manager.collection_requests == ['pattern:name']
def test_request_product(self):
self.manager.request_product('name')
assert self.manager.product_requests == ['product:name']
def test_request_package_exclusion(self):
self.manager.request_package_exclusion('name')
assert self.manager.exclude_requests == ['name']
@patch('kiwi.command.Command.call')
def test_process_install_requests_bootstrap(self, mock_call):
self.manager.request_package('vim')
self.manager.process_install_requests_bootstrap()
mock_call.assert_called_once_with(
[
'zypper', '--reposd-dir', 'root-dir/my/repos',
'--root', 'root-dir',
'install', '--auto-agree-with-licenses'
] + self.manager.custom_args + ['vim'], self.command_env
)
@patch('kiwi.command.Command.call')
@patch('kiwi.command.Command.run')
@patch('os.path.exists')
@patch('kiwi.package_manager.zypper.Path.create')
def test_process_install_requests(
self, mock_path, mock_exists, mock_run, mock_call
):
mock_exists.return_value = False
self.manager.request_package('vim')
self.manager.request_package_exclusion('skipme')
self.manager.process_install_requests()
mock_path.assert_called_once_with('root-dir/etc/zypp')
mock_run.assert_called_once_with(
['chroot', 'root-dir', 'zypper'] + self.chroot_zypper_args + [
'al'
] + self.manager.custom_args + ['skipme'], self.chroot_command_env
)
mock_call.assert_called_once_with(
['chroot', 'root-dir', 'zypper'] + self.chroot_zypper_args + [
'install', '--auto-agree-with-licenses'
] + self.manager.custom_args + ['vim'], self.chroot_command_env
)
@patch('kiwi.command.Command.call')
@patch('kiwi.command.Command.run')
def test_process_delete_requests_all_installed(self, mock_run, mock_call):
self.manager.request_package('vim')
self.manager.process_delete_requests()
mock_call.assert_called_once_with(
['chroot', 'root-dir', 'zypper'] + self.chroot_zypper_args + [
'remove', '-u', '--force-resolution'
] + self.manager.custom_args + ['vim'], self.chroot_command_env
)
@patch('kiwi.command.Command.call')
@patch('kiwi.command.Command.run')
def test_process_delete_requests_force(self, mock_run, mock_call):
self.manager.request_package('vim')
self.manager.process_delete_requests(True)
mock_call.assert_called_once_with(
[
'chroot', 'root-dir', 'rpm', '-e',
'--nodeps', '--allmatches', '--noscripts', 'vim'
], self.command_env
)
@patch('kiwi.command.Command.run')
@patch('kiwi.command.Command.call')
@raises(KiwiRequestError)
def test_process_delete_requests_package_missing(self, mock_call, mock_run):
mock_run.side_effect = Exception
self.manager.request_package('vim')
self.manager.process_delete_requests()
mock_run.assert_called_once_with(
['chroot', 'root-dir', 'rpm', '-q', 'vim']
)
@patch('kiwi.command.Command.call')
def test_update(self, mock_call):
self.manager.update()
mock_call.assert_called_once_with(
['chroot', 'root-dir', 'zypper'] + self.chroot_zypper_args + [
'update', '--auto-agree-with-licenses'
] + self.manager.custom_args, self.chroot_command_env
)
def test_process_only_required(self):
self.manager.process_only_required()
assert self.manager.custom_args == ['--no-recommends']
def test_process_plus_recommended(self):
self.manager.process_only_required()
assert self.manager.custom_args == ['--no-recommends']
self.manager.process_plus_recommended()
assert '--no-recommends' not in self.manager.custom_args
def test_match_package_installed(self):
assert self.manager.match_package_installed('foo', 'Installing: foo')
def test_match_package_deleted(self):
assert self.manager.match_package_deleted('foo', 'Removing: foo')
@patch('kiwi.package_manager.zypper.RpmDataBase')
def test_post_process_install_requests_bootstrap(self, mock_RpmDataBase):
rpmdb = mock.Mock()
mock_RpmDataBase.return_value = rpmdb
self.manager.post_process_install_requests_bootstrap()
rpmdb.set_database_to_image_path.assert_called_once_with()
def test_has_failed(self):
assert self.manager.has_failed(0) is False
assert self.manager.has_failed(102) is False
assert self.manager.has_failed(100) is False
assert self.manager.has_failed(104) is True
assert self.manager.has_failed(105) is True
assert self.manager.has_failed(106) is True
assert self.manager.has_failed(1) is True
assert self.manager.has_failed(4) is True
assert self.manager.has_failed(-42) is True
| gpl-3.0 |
Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/tests/integration/cqlengine/statements/test_where_clause.py | 6 | 1642 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import six
from cassandra.cqlengine.operators import EqualsOperator
from cassandra.cqlengine.statements import StatementException, WhereClause
class TestWhereClause(unittest.TestCase):
def test_operator_check(self):
""" tests that creating a where statement with a non BaseWhereOperator object fails """
with self.assertRaises(StatementException):
WhereClause('a', 'b', 'c')
def test_where_clause_rendering(self):
""" tests that where clauses are rendered properly """
wc = WhereClause('a', EqualsOperator(), 'c')
wc.set_context_id(5)
self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc))
self.assertEqual('"a" = %(5)s', str(wc), type(wc))
def test_equality_method(self):
""" tests that 2 identical where clauses evaluate as == """
wc1 = WhereClause('a', EqualsOperator(), 'c')
wc2 = WhereClause('a', EqualsOperator(), 'c')
assert wc1 == wc2
| apache-2.0 |
lduarte1991/edx-platform | common/lib/sandbox-packages/verifiers/tests_draganddrop.py | 24 | 32789 | import json
import unittest
import draganddrop
from .draganddrop import PositionsCompare
class Test_PositionsCompare(unittest.TestCase):
""" describe"""
def test_nested_list_and_list1(self):
self.assertEqual(PositionsCompare([[1, 2], 40]), PositionsCompare([1, 3]))
def test_nested_list_and_list2(self):
self.assertNotEqual(PositionsCompare([1, 12]), PositionsCompare([1, 1]))
def test_list_and_list1(self):
self.assertNotEqual(PositionsCompare([[1, 2], 12]), PositionsCompare([1, 15]))
def test_list_and_list2(self):
self.assertEqual(PositionsCompare([1, 11]), PositionsCompare([1, 1]))
def test_numerical_list_and_string_list(self):
self.assertNotEqual(PositionsCompare([1, 2]), PositionsCompare(["1"]))
def test_string_and_string_list1(self):
self.assertEqual(PositionsCompare("1"), PositionsCompare(["1"]))
def test_string_and_string_list2(self):
self.assertEqual(PositionsCompare("abc"), PositionsCompare("abc"))
def test_string_and_string_list3(self):
self.assertNotEqual(PositionsCompare("abd"), PositionsCompare("abe"))
def test_float_and_string(self):
self.assertNotEqual(PositionsCompare([3.5, 5.7]), PositionsCompare(["1"]))
def test_floats_and_ints(self):
self.assertEqual(PositionsCompare([3.5, 4.5]), PositionsCompare([5, 7]))
class Test_DragAndDrop_Grade(unittest.TestCase):
def test_targets_are_draggable_1(self):
user_input = json.dumps([
{'p': 'p_l'},
{'up': {'first': {'p': 'p_l'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'anyof'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][first]'
],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2(self):
user_input = json.dumps([
{'p': 'p_l'},
{'p': 'p_r'},
{'s': 's_l'},
{'s': 's_r'},
{'up': {'1': {'p': 'p_l'}}},
{'up': {'3': {'p': 'p_l'}}},
{'up': {'1': {'p': 'p_r'}}},
{'up': {'3': {'p': 'p_r'}}},
{'up_and_down': {'1': {'s': 's_l'}}},
{'up_and_down': {'1': {'s': 's_r'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2_manual_parsing(self):
user_input = json.dumps([
{'up': 'p_l[p][1]'},
{'p': 'p_l'},
{'up': 'p_l[p][3]'},
{'up': 'p_r[p][1]'},
{'p': 'p_r'},
{'up': 'p_r[p][3]'},
{'up_and_down': 's_l[s][1]'},
{'s': 's_l'},
{'up_and_down': 's_r[s][1]'},
{'s': 's_r'}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_3_nested(self):
user_input = json.dumps([
{'molecule': 'left_side_tagret'},
{'molecule': 'right_side_tagret'},
{'p': {'p_target': {'molecule': 'left_side_tagret'}}},
{'p': {'p_target': {'molecule': 'right_side_tagret'}}},
{'s': {'s_target': {'molecule': 'left_side_tagret'}}},
{'s': {'s_target': {'molecule': 'right_side_tagret'}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'left_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'right_side_tagret'}}}}}
])
correct_answer = [
{
'draggables': ['molecule'],
'targets': ['left_side_tagret', 'right_side_tagret'],
'rule': 'unordered_equal'
},
{
'draggables': ['p'],
'targets': [
'left_side_tagret[molecule][p_target]',
'right_side_tagret[molecule][p_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': [
'left_side_tagret[molecule][s_target]',
'right_side_tagret[molecule][s_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
'left_side_tagret[molecule][s_target][s][1]',
'right_side_tagret[molecule][s_target][s][1]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'left_side_tagret[molecule][p_target][p][1]',
'left_side_tagret[molecule][p_target][p][3]',
'right_side_tagret[molecule][p_target][p][1]',
'right_side_tagret[molecule][p_target][p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_4_real_example(self):
user_input = json.dumps([
{'single_draggable': 's_l'},
{'single_draggable': 's_r'},
{'single_draggable': 'p_sigma'},
{'single_draggable': 'p_sigma*'},
{'single_draggable': 's_sigma'},
{'single_draggable': 's_sigma*'},
{'double_draggable': 'p_pi*'},
{'double_draggable': 'p_pi'},
{'triple_draggable': 'p_l'},
{'triple_draggable': 'p_r'},
{'up': {'1': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_r'}}},
{'up': {'3': {'triple_draggable': 'p_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_l'}}},
{'up_and_down': {'1': {'single_draggable': 's_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma*'}}},
{'up_and_down': {'1': {'double_draggable': 'p_pi'}}},
{'up_and_down': {'2': {'double_draggable': 'p_pi'}}}
])
# 10 targets:
# s_l, s_r, p_l, p_r, s_sigma, s_sigma*, p_pi, p_sigma, p_pi*, p_sigma*
#
# 3 draggable objects, which have targets (internal target ids - 1, 2, 3):
# single_draggable, double_draggable, triple_draggable
#
# 2 draggable objects:
# up, up_and_down
correct_answer = [
{
'draggables': ['triple_draggable'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['double_draggable'],
'targets': ['p_pi', 'p_pi*'],
'rule': 'unordered_equal'
},
{
'draggables': ['single_draggable'],
'targets': ['s_l', 's_r', 's_sigma', 's_sigma*', 'p_sigma', 'p_sigma*'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[triple_draggable][1]',
'p_l[triple_draggable][2]',
'p_r[triple_draggable][2]',
'p_r[triple_draggable][3]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
's_l[single_draggable][1]',
's_r[single_draggable][1]',
's_sigma[single_draggable][1]',
's_sigma*[single_draggable][1]',
'p_pi[double_draggable][1]',
'p_pi[double_draggable][2]',
],
'rule': 'unordered_equal'
},
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_true(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_wrong(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = []
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_right(self):
user_input = '[]'
correct_answer = []
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_false(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_true(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2', '2': 't1'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_false(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't2', 'name_with_icon': 't2', '2': 't1'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_targets_and_positions(self):
user_input = '[{"1": [10,10]}, \
{"name_with_icon": [[10,10],4]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [[10, 10], 4]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_position_and_targets(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_exact(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [25, 25], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [14, 14], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 30], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_correct_answer_not_has_key_from_user_answer(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'3': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_anywhere(self):
"""Draggables can be places anywhere on base image.
Place grass in the middle of the image and ant in the
right upper corner."""
user_input = '[{"ant":[610.5,57.449951171875]},\
{"grass":[322.5,199.449951171875]}]'
correct_answer = {'grass': [[300, 200], 200], 'ant': [[500, 0], 200]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_correct(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_extra_element_incorrect(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"17":"p_left_3"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_no_mupliples(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target3"},{"2":"target4"},{"2":"target5"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples_wrong(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"}, \
{"2":"target3"}, \
{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_false(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple_false(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused(self):
"""Test a b c in 10 labels reused"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused_false(self):
"""Test a b c in 10 labels reused false"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, {"a":"target8"},\
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse(self):
"""Test reusable draggables """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"},\
{"a":"target5"}]'
correct_answer = [
{
'draggables': ['a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number(self):
"""Test reusable draggables with number """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4'],
'rule': 'anyof+number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number_false(self):
"""Test reusable draggables with numbers, but wrong"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target10'],
'rule': 'anyof_number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_alternative_correct_answer(self):
user_input = '[{"name_with_icon":"t1"},\
{"name_with_icon":"t1"},{"name_with_icon":"t1"},{"name4":"t1"}, \
{"name4":"t1"}]'
correct_answer = [
{'draggables': ['name4'], 'targets': ['t1', 't1'], 'rule': 'exact'},
{'draggables': ['name_with_icon'], 'targets': ['t1', 't1', 't1'],
'rule': 'exact'}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
class Test_DragAndDrop_Populate(unittest.TestCase):
def test_1(self):
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
dnd = draganddrop.DragAndDrop(correct_answer, user_input)
correct_groups = [['1'], ['name_with_icon']]
correct_positions = [{'exact': [[[40, 10], 29]]}, {'exact': [[20, 20]]}]
user_groups = [['1'], ['name_with_icon']]
user_positions = [{'user': [[10, 10]]}, {'user': [[20, 20]]}]
self.assertEqual(correct_groups, dnd.correct_groups)
self.assertEqual(correct_positions, dnd.correct_positions)
self.assertEqual(user_groups, dnd.user_groups)
self.assertEqual(user_positions, dnd.user_positions)
class Test_DraAndDrop_Compare_Positions(unittest.TestCase):
def test_1(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='anyof'))
def test_2a(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='exact'))
def test_2b(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 13], [1, 1]],
flag='exact'))
def test_3(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b"],
user=["a", "b", "c"],
flag='anyof'))
def test_4(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "b"],
flag='anyof'))
def test_5(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='exact'))
def test_6(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='anyof'))
def test_7(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "b"],
user=["a", "c", "b"],
flag='anyof'))
def suite():
testcases = [Test_PositionsCompare,
Test_DragAndDrop_Populate,
Test_DragAndDrop_Grade,
Test_DraAndDrop_Compare_Positions]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
daxxi13/CouchPotatoServer | couchpotato/core/downloaders/putio/main.py | 7 | 5908 | from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEventAsync
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from pio import api as pio
import datetime
log = CPLog(__name__)
autoload = 'Putiodownload'
class PutIO(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
downloading_list = []
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
def __init__(self):
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
'desc': 'Allows you to download file from prom Put.io',
})
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
addApiView('downloader.putio.credentials', self.getCredentials)
addEvent('putio.download', self.putioDownloader)
return super(PutIO, self).__init__()
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to put.io', data.get('name'))
url = data.get('url')
client = pio.Client(self.conf('oauth_token'))
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '/' + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
resp = client.Transfer.add_url(url, callback_url = callbackurl)
log.debug('resp is %s', resp.id);
return self.downloadReturnId(resp.id)
def test(self):
try:
client = pio.Client(self.conf('oauth_token'))
if client.File.list():
return True
except:
log.info('Failed to get file listing, check OAUTH_TOKEN')
return False
def getAuthorizationUrl(self, host = None, **kwargs):
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
log.debug('callback_url is %s', callback_url)
target_url = self.oauth_authenticate + "?target=" + callback_url
log.debug('target_url is %s', target_url)
return {
'success': True,
'url': target_url,
}
def getCredentials(self, **kwargs):
try:
oauth_token = kwargs.get('oauth')
except:
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
log.debug('oauth_token is: %s', oauth_token)
self.conf('oauth_token', value = oauth_token);
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
def getAllDownloadStatus(self, ids):
log.debug('Checking putio download status.')
client = pio.Client(self.conf('oauth_token'))
transfers = client.Transfer.list()
log.debug(transfers);
release_downloads = ReleaseDownloadList(self)
for t in transfers:
if t.id in ids:
log.debug('downloading list is %s', self.downloading_list)
if t.status == "COMPLETED" and self.conf('download') == False :
status = 'completed'
# So check if we are trying to download something
elif t.status == "COMPLETED" and self.conf('download') == True:
# Assume we are done
status = 'completed'
if not self.downloading_list:
now = datetime.datetime.utcnow()
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
# We need to make sure a race condition didn't happen
if (now - date_time) < datetime.timedelta(minutes=5):
# 5 minutes haven't passed so we wait
status = 'busy'
else:
# If we have the file_id in the downloading_list mark it as busy
if str(t.file_id) in self.downloading_list:
status = 'busy'
else:
status = 'busy'
release_downloads.append({
'id' : t.id,
'name': t.name,
'status': status,
'timeleft': t.estimated_time,
})
return release_downloads
def putioDownloader(self, fid):
log.info('Put.io Real downloader called with file_id: %s',fid)
client = pio.Client(self.conf('oauth_token'))
log.debug('About to get file List')
files = client.File.list()
downloaddir = self.conf('download_dir')
for f in files:
if str(f.id) == str(fid):
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
# Once the download is complete we need to remove it from the running list.
self.downloading_list.remove(fid)
return True
def getFromPutio(self, **kwargs):
try:
file_id = str(kwargs.get('file_id'))
except:
return {
'success' : False,
}
log.info('Put.io Download has been called file_id is %s', file_id)
if file_id not in self.downloading_list:
self.downloading_list.append(file_id)
fireEventAsync('putio.download',fid = file_id)
return {
'success': True,
}
return {
'success': False,
}
| gpl-3.0 |
citationfinder/scholarly_citation_finder | scholarly_citation_finder/lib/tests.py | 1 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.test import TestCase
from file import download_file, unzip_file
from process import ProcessException
class FileTest(TestCase):
def test_download_file_success(self):
file = 'Conferences.zip'
first = download_file(path='https://academicgraph.blob.core.windows.net/graph-2015-11-06/',
file=file,
cwd=None)
self.assertEqual(first, file)
def test_download_file_wrong_path(self):
self.assertRaises(ProcessException, download_file, 'https://example.org/', 'non.zip')
"""
class UtilTest(TestCase):
URL_PDF = 'http://www.ronpub.com/publications/OJWT_2014v1i2n02_Kusserow.pdf'
URL_PDF_2 = 'http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.11.6264&rep=rep1&type=pdf'
EXAMPLE_GZ_FILE = os.path.join(config.TEST_DIR, 'downloads', 'example.pdf.gz')
def test_url_not_well_formed(self):
first = url_exits('http://example./paper.pdf')
self.assertEqual(first, False)
#def test_url_does_not_exits(self):
# first = url_exits('http://example.org/paper.pdf')
# self.assertEqual(first, False)
#def test_pdf_exits(self):
# first = url_exits('http://www.informatik.uni-bremen.de/agra/doc/work/evohot04.pdf')
# self.assertEqual(first, True)
def test_download_file(self):
filename = download_file(self.URL_PDF, config.TEST_DIR)
first = os.path.isfile(filename);
self.assertEqual(first, True)
os.remove(filename)
def test_download_file2(self):
filename = download_file(self.URL_PDF_2, config.TEST_DIR, 'test.pdf')
first = os.path.isfile(filename);
self.assertEqual(first, True)
os.remove(filename)
#def test_unzip_file(self):
# filename = unzip_file(self.EXAMPLE_GZ_FILE)
# first = os.path.isfile(filename);
# if first:
# os.remove(filename)
# self.assertEqual(first, True)
""" | mit |
cogmission/nupic | examples/opf/experiments/multistep/simple_0_f2/description.py | 38 | 1618 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpParams': { }},
'predictedField': 'field2'}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
mrquim/repository.mrquim | plugin.video.live.ike/websocket/_ssl_compat.py | 69 | 1551 | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
__all__ = ["HAVE_SSL", "ssl", "SSLError"]
try:
import ssl
from ssl import SSLError
if hasattr(ssl, 'SSLContext') and hasattr(ssl.SSLContext, 'check_hostname'):
HAVE_CONTEXT_CHECK_HOSTNAME = True
else:
HAVE_CONTEXT_CHECK_HOSTNAME = False
if hasattr(ssl, "match_hostname"):
from ssl import match_hostname
else:
from backports.ssl_match_hostname import match_hostname
__all__.append("match_hostname")
__all__.append("HAVE_CONTEXT_CHECK_HOSTNAME")
HAVE_SSL = True
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
HAVE_SSL = False
| gpl-2.0 |
willusher/ansible-modules-core | cloud/openstack/os_auth.py | 67 | 2036 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_auth
short_description: Retrieve an auth token
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Retrieve an auth token from an OpenStack Cloud
requirements:
- "python >= 2.6"
- "shade"
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Authenticate to the cloud and retrieve the service catalog
- os_auth:
cloud: rax-dfw
- debug: var=service_catalog
'''
def main():
argument_spec = openstack_full_argument_spec()
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
module.exit_json(
changed=False,
ansible_facts=dict(
auth_token=cloud.auth_token,
service_catalog=cloud.service_catalog))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
muff1nman/password-store | contrib/importers/keepass2pass.py | 26 | 4832 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Stefan Simroth <stefan.simroth@gmail.com>. All Rights Reserved.
# Based on the script for KeepassX by Juhamatti Niemelä <iiska@iki.fi>.
# This file is licensed under the GPLv2+. Please see COPYING for more information.
#
# Usage:
# ./keepass2pass.py -f export.xml
# By default, takes the name of the root element and puts all passwords in there, but you can disable this:
# ./keepass2pass.py -f export.xml -r ""
# Or you can use another root folder:
# ./keepass2pass.py -f export.xml -r foo
#
# Features:
# * This script can handle duplicates and will merge them.
# * Besides the password also the fields 'UserName', 'URL' and 'Notes' (comment) will be inserted.
# * You get a warning if an entry has no password, but it will still insert it.
import getopt, sys
from subprocess import Popen, PIPE
from xml.etree import ElementTree
def pass_import_entry(path, data):
""" Import new password entry to password-store using pass insert command """
proc = Popen(['pass', 'insert', '--multiline', path], stdin=PIPE, stdout=PIPE)
proc.communicate(data.encode('utf8'))
proc.wait()
def get_value(elements, node_text):
for element in elements:
for child in element.findall('Key'):
if child.text == node_text:
return element.find('Value').text
return ''
def path_for(element, path=''):
""" Generate path name from elements title and current path """
if element.tag == 'Entry':
title = get_value(element.findall("String"), "Title")
elif element.tag == 'Group':
title = element.find('Name').text
else: title = ''
if path == '': return title
else: return '/'.join([path, title])
def password_data(element, path=''):
""" Return password data and additional info if available from password entry element. """
data = ""
password = get_value(element.findall('String'), 'Password')
if password is not None: data = password + "\n"
else:
print "[WARN] No password: %s" % path_for(element, path)
for field in ['UserName', 'URL', 'Notes']:
value = get_value(element, field)
if value is not None and not len(value) == 0:
data = "%s%s: %s\n" % (data, field, value)
return data
def import_entry(entries, element, path=''):
element_path = path_for(element, path)
if entries.has_key(element_path):
print "[INFO] Duplicate needs merging: %s" % element_path
existing_data = entries[element_path]
data = "%s---------\nPassword: %s" % (existing_data, password_data(element))
else:
data = password_data(element, path)
entries[element_path] = data
def import_group(entries, element, path=''):
""" Import all entries and sub-groups from given group """
npath = path_for(element, path)
for group in element.findall('Group'):
import_group(entries, group, npath)
for entry in element.findall('Entry'):
import_entry(entries, entry, npath)
def import_passwords(xml_file, root_path=None):
""" Parse given Keepass2 XML file and import password groups from it """
print "[>>>>] Importing passwords from file %s" % xml_file
print "[INFO] Root path: %s" % root_path
entries = dict()
with open(xml_file) as xml:
text = xml.read()
xml_tree = ElementTree.XML(text)
root = xml_tree.find('Root')
root_group = root.find('Group')
import_group(entries,root_group,'')
if root_path is None: root_path = root_group.find('Name').text
groups = root_group.findall('Group')
for group in groups:
import_group(entries, group, root_path)
password_count = 0
for path, data in sorted(entries.iteritems()):
sys.stdout.write("[>>>>] Importing %s ... " % path.encode("utf-8"))
pass_import_entry(path, data)
sys.stdout.write("OK\n")
password_count += 1
print "[ OK ] Done. Imported %i passwords." % password_count
def usage():
""" Print usage """
print "Usage: %s -f XML_FILE" % (sys.argv[0])
print "Optional:"
print " -r ROOT_PATH Different root path to use than the one in xml file, use \"\" for none"
def main(argv):
try:
opts, args = getopt.gnu_getopt(argv, "f:r:")
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
xml_file = None
root_path = None
for opt, arg in opts:
if opt in "-f":
xml_file = arg
if opt in "-r":
root_path = arg
if xml_file is not None:
import_passwords(xml_file, root_path)
else:
usage()
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 |
cgstudiomap/cgstudiomap | main/eggs/Pillow-3.0.0-py2.7-linux-x86_64.egg/PIL/Hdf5StubImagePlugin.py | 77 | 1549 | #
# The Python Imaging Library
# $Id$
#
# HDF5 stub adapter
#
# Copyright (c) 2000-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile
_handler = None
##
# Install application-specific HDF5 image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:8] == b"\x89HDF\r\n\x1a\n"
class HDF5StubImageFile(ImageFile.StubImageFile):
format = "HDF5"
format_description = "HDF5"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
raise SyntaxError("Not an HDF file")
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("HDF5 save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
Image.register_save(HDF5StubImageFile.format, _save)
Image.register_extension(HDF5StubImageFile.format, ".h5")
Image.register_extension(HDF5StubImageFile.format, ".hdf")
| agpl-3.0 |
PetePriority/home-assistant | tests/components/media_player/test_monoprice.py | 3 | 17182 | """The tests for Monoprice Media player platform."""
import unittest
from unittest import mock
import voluptuous as vol
from collections import defaultdict
from homeassistant.components.media_player.const import (
DOMAIN, SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE)
from homeassistant.const import STATE_ON, STATE_OFF
import tests.common
from homeassistant.components.media_player.monoprice import (
DATA_MONOPRICE, PLATFORM_SCHEMA, SERVICE_SNAPSHOT,
SERVICE_RESTORE, setup_platform)
import pytest
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockMonoprice:
"""Mock for pymonoprice object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(lambda: AttrDict(power=True,
volume=0,
mute=True,
source=1))
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].source = source_idx
def set_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_mute(self, zone_id, mute):
"""Mute/unmute zone."""
self.zones[zone_id].mute = mute
def set_volume(self, zone_id, volume):
"""Set volume for zone."""
self.zones[zone_id].volume = volume
def restore_zone(self, zone):
"""Restore zone status."""
self.zones[zone.zone] = AttrDict(zone)
class TestMonopriceSchema(unittest.TestCase):
"""Test Monoprice schema."""
def test_valid_schema(self):
"""Test valid schema."""
valid_schema = {
'platform': 'monoprice',
'port': '/dev/ttyUSB0',
'zones': {11: {'name': 'a'},
12: {'name': 'a'},
13: {'name': 'a'},
14: {'name': 'a'},
15: {'name': 'a'},
16: {'name': 'a'},
21: {'name': 'a'},
22: {'name': 'a'},
23: {'name': 'a'},
24: {'name': 'a'},
25: {'name': 'a'},
26: {'name': 'a'},
31: {'name': 'a'},
32: {'name': 'a'},
33: {'name': 'a'},
34: {'name': 'a'},
35: {'name': 'a'},
36: {'name': 'a'},
},
'sources': {
1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
5: {'name': 'a'},
6: {'name': 'a'}
}
}
PLATFORM_SCHEMA(valid_schema)
def test_invalid_schemas(self):
"""Test invalid schemas."""
schemas = (
{}, # Empty
None, # None
# Missing port
{
'platform': 'monoprice',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid zone number
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {10: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid source number
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {0: {'name': 'b'}},
},
# Zone missing name
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {}},
'sources': {1: {'name': 'b'}},
},
# Source missing name
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {1: {}},
},
)
for value in schemas:
with pytest.raises(vol.MultipleInvalid):
PLATFORM_SCHEMA(value)
class TestMonopriceMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self):
"""Set up the test case."""
self.monoprice = MockMonoprice()
self.hass = tests.common.get_test_home_assistant()
self.hass.start()
# Note, source dictionary is unsorted!
with mock.patch('pymonoprice.get_monoprice',
new=lambda *a: self.monoprice):
setup_platform(self.hass, {
'platform': 'monoprice',
'port': '/dev/ttyS0',
'name': 'Name',
'zones': {12: {'name': 'Zone name'}},
'sources': {1: {'name': 'one'},
3: {'name': 'three'},
2: {'name': 'two'}},
}, lambda *args, **kwargs: None, {})
self.hass.block_till_done()
self.media_player = self.hass.data[DATA_MONOPRICE][0]
self.media_player.hass = self.hass
self.media_player.entity_id = 'media_player.zone_1'
def tearDown(self):
"""Tear down the test case."""
self.hass.stop()
def test_setup_platform(self, *args):
"""Test setting up platform."""
# Two services must be registered
assert self.hass.services.has_service(DOMAIN, SERVICE_RESTORE)
assert self.hass.services.has_service(DOMAIN, SERVICE_SNAPSHOT)
assert len(self.hass.data[DATA_MONOPRICE]) == 1
assert self.hass.data[DATA_MONOPRICE][0].name == 'Zone name'
def test_service_calls_with_entity_id(self):
"""Test snapshot save/restore service calls."""
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
# Saving default values
self.hass.services.call(DOMAIN, SERVICE_SNAPSHOT,
{'entity_id': 'media_player.zone_1'},
blocking=True)
# self.hass.block_till_done()
# Changing media player to new state
self.media_player.set_volume_level(1)
self.media_player.select_source('two')
self.media_player.mute_volume(False)
self.media_player.turn_off()
# Checking that values were indeed changed
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_OFF == self.media_player.state
assert 1.0 == self.media_player.volume_level, 0.0001
assert not self.media_player.is_volume_muted
assert 'two' == self.media_player.source
# Restoring wrong media player to its previous state
# Nothing should be done
self.hass.services.call(DOMAIN, SERVICE_RESTORE,
{'entity_id': 'media.not_existing'},
blocking=True)
# self.hass.block_till_done()
# Checking that values were not (!) restored
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_OFF == self.media_player.state
assert 1.0 == self.media_player.volume_level, 0.0001
assert not self.media_player.is_volume_muted
assert 'two' == self.media_player.source
# Restoring media player to its previous state
self.hass.services.call(DOMAIN, SERVICE_RESTORE,
{'entity_id': 'media_player.zone_1'},
blocking=True)
self.hass.block_till_done()
# Checking that values were restored
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
def test_service_calls_without_entity_id(self):
"""Test snapshot save/restore service calls."""
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
# Restoring media player
# since there is no snapshot, nothing should be done
self.hass.services.call(DOMAIN, SERVICE_RESTORE, blocking=True)
self.hass.block_till_done()
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
# Saving default values
self.hass.services.call(DOMAIN, SERVICE_SNAPSHOT, blocking=True)
self.hass.block_till_done()
# Changing media player to new state
self.media_player.set_volume_level(1)
self.media_player.select_source('two')
self.media_player.mute_volume(False)
self.media_player.turn_off()
# Checking that values were indeed changed
self.media_player.update()
assert 'Zone name' == self.media_player.name
assert STATE_OFF == self.media_player.state
assert 1.0 == self.media_player.volume_level, 0.0001
assert not self.media_player.is_volume_muted
assert 'two' == self.media_player.source
# Restoring media player to its previous state
self.hass.services.call(DOMAIN, SERVICE_RESTORE, blocking=True)
self.hass.block_till_done()
# Checking that values were restored
assert 'Zone name' == self.media_player.name
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
def test_update(self):
"""Test updating values from monoprice."""
assert self.media_player.state is None
assert self.media_player.volume_level is None
assert self.media_player.is_volume_muted is None
assert self.media_player.source is None
self.media_player.update()
assert STATE_ON == self.media_player.state
assert 0.0 == self.media_player.volume_level, 0.0001
assert self.media_player.is_volume_muted
assert 'one' == self.media_player.source
def test_name(self):
"""Test name property."""
assert 'Zone name' == self.media_player.name
def test_state(self):
"""Test state property."""
assert self.media_player.state is None
self.media_player.update()
assert STATE_ON == self.media_player.state
self.monoprice.zones[12].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
def test_volume_level(self):
"""Test volume level property."""
assert self.media_player.volume_level is None
self.media_player.update()
assert 0.0 == self.media_player.volume_level, 0.0001
self.monoprice.zones[12].volume = 38
self.media_player.update()
assert 1.0 == self.media_player.volume_level, 0.0001
self.monoprice.zones[12].volume = 19
self.media_player.update()
assert .5 == self.media_player.volume_level, 0.0001
def test_is_volume_muted(self):
"""Test volume muted property."""
assert self.media_player.is_volume_muted is None
self.media_player.update()
assert self.media_player.is_volume_muted
self.monoprice.zones[12].mute = False
self.media_player.update()
assert not self.media_player.is_volume_muted
def test_supported_features(self):
"""Test supported features property."""
assert SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON | \
SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE == \
self.media_player.supported_features
def test_source(self):
"""Test source property."""
assert self.media_player.source is None
self.media_player.update()
assert 'one' == self.media_player.source
def test_media_title(self):
"""Test media title property."""
assert self.media_player.media_title is None
self.media_player.update()
assert 'one' == self.media_player.media_title
def test_source_list(self):
"""Test source list property."""
# Note, the list is sorted!
assert ['one', 'two', 'three'] == \
self.media_player.source_list
def test_select_source(self):
"""Test source selection methods."""
self.media_player.update()
assert 'one' == self.media_player.source
self.media_player.select_source('two')
assert 2 == self.monoprice.zones[12].source
self.media_player.update()
assert 'two' == self.media_player.source
# Trying to set unknown source
self.media_player.select_source('no name')
assert 2 == self.monoprice.zones[12].source
self.media_player.update()
assert 'two' == self.media_player.source
def test_turn_on(self):
"""Test turning on the zone."""
self.monoprice.zones[12].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
self.media_player.turn_on()
assert self.monoprice.zones[12].power
self.media_player.update()
assert STATE_ON == self.media_player.state
def test_turn_off(self):
"""Test turning off the zone."""
self.monoprice.zones[12].power = True
self.media_player.update()
assert STATE_ON == self.media_player.state
self.media_player.turn_off()
assert not self.monoprice.zones[12].power
self.media_player.update()
assert STATE_OFF == self.media_player.state
def test_mute_volume(self):
"""Test mute functionality."""
self.monoprice.zones[12].mute = True
self.media_player.update()
assert self.media_player.is_volume_muted
self.media_player.mute_volume(False)
assert not self.monoprice.zones[12].mute
self.media_player.update()
assert not self.media_player.is_volume_muted
self.media_player.mute_volume(True)
assert self.monoprice.zones[12].mute
self.media_player.update()
assert self.media_player.is_volume_muted
def test_set_volume_level(self):
"""Test set volume level."""
self.media_player.set_volume_level(1.0)
assert 38 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
self.media_player.set_volume_level(0.0)
assert 0 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
self.media_player.set_volume_level(0.5)
assert 19 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
def test_volume_up(self):
"""Test increasing volume by one."""
self.monoprice.zones[12].volume = 37
self.media_player.update()
self.media_player.volume_up()
assert 38 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
# Try to raise value beyond max
self.media_player.update()
self.media_player.volume_up()
assert 38 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
def test_volume_down(self):
"""Test decreasing volume by one."""
self.monoprice.zones[12].volume = 1
self.media_player.update()
self.media_player.volume_down()
assert 0 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
# Try to lower value beyond minimum
self.media_player.update()
self.media_player.volume_down()
assert 0 == self.monoprice.zones[12].volume
assert isinstance(self.monoprice.zones[12].volume, int)
| apache-2.0 |
mario23285/ProyectoElectrico | src/Arm.py | 1 | 4020 | """
UNIVERSIDAD DE COSTA RICA Escuela de Ingeniería Eléctrica
IE0499 | Proyecto Eléctrico
Mario Alberto Castresana Avendaño
A41267
Programa: BVH_TuneUp
-------------------------------------------------------------------------------
archivo: Arm.py
descripción:
Este archivo contiene la clase Arm, la cual se utiliza para implementar el
brazo izquierdo y el derecho. Los estudios de goniometría para este hueso
se basan en los siguientes límites de los ángulos de Euler:
Z aducción(+) y abducción(-) plano frontal
X aducción(+) y abducción(-) plano transversal
Y rotación interna(+) y rotación externa(-)
"""
from Bone import Bone
class Arm(Bone):
"""
Esta subclase implementa el estudio de goniometría para los brazos en
el esqueleto del BVH. La jerarquía los llama "Arm".
"""
def __init__(self, ID=' ', Zp=0, Xp=0, Yp=0):
"""
Se inicializa este hueso con los siguientes parámetros
ID: identificador del bone. Ej: izquierdo/derecho
Cada posición del hueso se define con un vector de ángulos de Euler
(Z, X, Y) los cuales tienen una posición específica dentro del array
de la sección MOTION del BVH
Zp: índice del array MOTION que contiene el angulo de euler Z para ese hueso
Xp: índice del array MOTION que contiene el angulo de euler X para ese hueso
Yp: índice del array MOTION que contiene el angulo de euler Y para ese hueso
"""
self.ID = ID
self.Zp = Zp
self.Xp = Xp
self.Yp = Yp
#se llama al constructor de la super clase para acceder a todos los atributos
#de goniometría
Bone.__init__(self,
Name='Brazo',
Zmin=-90.000000,
Zmax=90.000000,
Xmin=-45.000000,
Xmax=135.000000,
Ymin=-90.000000,
Ymax=90.000000)
def Goniometry_check(self, MOTION, frame):
"""
Descripción:
Esta función se encarga de comparar el valor de los ángulos de Euler que
un hueso posee en un frame determinado, con el valor de los límites
goniométricos de ese hueso en particular. Si algún ángulo de Euler excede
los límites del movimiento humano, se reportará un glitch en ese frame
y se procederá a corregirlo en el arreglo MOTION.
argumentos:
MOTION: arreglo de 156 posiciones que contiene todos los ángulos de Euler
para cada hueso en un frame dado. El orden de cada hueso viene dado por
la sección HIERARCHY del BVH.
frame: cuadro del video de MoCap que se está analizando
"""
#Primero, definimos los valores de cada ángulo de Euler
Zeuler = MOTION[self.Zp]
Xeluer = MOTION[self.Xp]
Yeuler = MOTION[self.Yp]
glitch = False
ErrorMsg = ' existen glitches de '
#probamos límites en Z
if Zeuler < self.Zmin:
MOTION[self.Zp] = self.Zmin
glitch = True
ErrorMsg += 'abduccion horizontal | '
if Zeuler > self.Zmax:
MOTION[self.Zp] = self.Zmax
glitch = True
ErrorMsg += 'aduccion horizontal | '
#aquí probamos límites en X
if Xeluer < self.Xmin:
MOTION[self.Xp] = self.Xmin
glitch = True
ErrorMsg += 'abduccion frontal | '
if Xeluer > self.Xmax:
MOTION[self.Xp] = self.Xmax
glitch = True
ErrorMsg += 'aduccion frontal| '
#aquí probamos límites en Y
if Yeuler < self.Ymin:
MOTION[self.Yp] = self.Ymin
glitch = True
ErrorMsg += 'rotacion externa | '
if Yeuler > self.Ymax:
MOTION[self.Yp] = self.Ymax
glitch = True
ErrorMsg += 'rotacion interna | '
if glitch:
self.Report_glitch(ErrorMsg, frame)
| gpl-2.0 |
pravsripad/mne-python | mne/preprocessing/tests/test_artifact_detection.py | 11 | 3299 | # Author: Adonay Nunes <adonay.s.nunes@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_allclose
from mne.chpi import read_head_pos
from mne.datasets import testing
from mne.io import read_raw_fif
from mne.preprocessing import (annotate_movement, compute_average_dev_head_t,
annotate_muscle_zscore)
from mne import Annotations
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
@testing.requires_testing_data
def test_movement_annotation_head_correction():
"""Test correct detection movement artifact and dev_head_t."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
pos = read_head_pos(pos_fname)
# Check 5 rotation segments are detected
annot_rot, [] = annotate_movement(raw, pos, rotation_velocity_limit=5)
assert(annot_rot.duration.size == 5)
# Check 2 translation vel. segments are detected
annot_tra, [] = annotate_movement(raw, pos, translation_velocity_limit=.05)
assert(annot_tra.duration.size == 2)
# Check 1 movement distance segment is detected
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
# Check correct trans mat
raw.set_annotations(annot_rot + annot_tra + annot_dis)
dev_head_t = compute_average_dev_head_t(raw, pos)
dev_head_t_ori = np.array([
[0.9957292, -0.08688804, 0.03120615, 0.00698271],
[0.09020767, 0.9875856, -0.12859731, -0.0159098],
[-0.01964518, 0.1308631, 0.99120578, 0.07258289],
[0., 0., 0., 1.]])
assert_allclose(dev_head_t_ori, dev_head_t['trans'], rtol=1e-5, atol=0)
# Smoke test skipping time due to previous annotations.
raw.set_annotations(Annotations([raw.times[0]], 0.1, 'bad'))
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
@testing.requires_testing_data
def test_muscle_annotation():
"""Test correct detection muscle artifacts."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
raw.notch_filter([50, 110, 150])
# Check 2 muscle segments are detected
annot_muscle, scores = annotate_muscle_zscore(raw, ch_type='mag',
threshold=10)
onset = annot_muscle.onset * raw.info['sfreq']
onset = onset.astype(int)
np.testing.assert_array_equal(scores[onset].astype(int), np.array([23,
10]))
assert(annot_muscle.duration.size == 2)
@testing.requires_testing_data
def test_muscle_annotation_without_meeg_data():
"""Call annotate_muscle_zscore with data without meg or eeg."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes')
raw.crop(0, .1).load_data()
raw.pick_types(meg=False, stim=True)
with pytest.raises(ValueError, match="No M/EEG channel types found"):
annot_muscle, scores = annotate_muscle_zscore(raw, threshold=10)
| bsd-3-clause |
stellaf/sales_rental | account_invoice_start_end_dates/models/account_invoice.py | 1 | 3875 | # -*- coding: utf-8 -*-
# © 2013-2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
must_have_dates = fields.Boolean(
related='product_id.must_have_dates', readonly=True)
@api.multi
@api.constrains('start_date', 'end_date')
def _check_start_end_dates(self):
for invline in self:
if invline.start_date and not invline.end_date:
raise ValidationError(
_("Missing End Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and not invline.start_date:
raise ValidationError(
_("Missing Start Date for invoice line with "
"Description '%s'.")
% (invline.name))
if invline.end_date and invline.start_date and \
invline.start_date > invline.end_date:
raise ValidationError(
_("Start Date should be before or be the same as "
"End Date for invoice line with Description '%s'.")
% (invline.name))
# Note : we can't check invline.product_id.must_have_dates
# have start_date and end_date here, because it would
# block automatic invoice generation/import. So we do the check
# upon validation of the invoice (see below the function
# action_move_create)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def inv_line_characteristic_hashcode(self, invoice_line):
"""Add start and end dates to hashcode used when the option "Group
Invoice Lines" is active on the Account Journal"""
code = super(AccountInvoice, self).inv_line_characteristic_hashcode(
invoice_line)
hashcode = '%s-%s-%s' % (
code,
invoice_line.get('start_date', 'False'),
invoice_line.get('end_date', 'False'),
)
return hashcode
@api.model
def line_get_convert(self, line, part):
"""Copy from invoice to move lines"""
res = super(AccountInvoice, self).line_get_convert(line, part)
res['start_date'] = line.get('start_date', False)
res['end_date'] = line.get('end_date', False)
return res
@api.model
def invoice_line_move_line_get(self):
"""Copy from invoice line to move lines"""
res = super(AccountInvoice, self).invoice_line_move_line_get()
ailo = self.env['account.invoice.line']
for move_line_dict in res:
iline = ailo.browse(move_line_dict['invl_id'])
move_line_dict['start_date'] = iline.start_date
move_line_dict['end_date'] = iline.end_date
return res
@api.multi
def action_move_create(self):
"""Check that products with must_have_dates=True have
Start and End Dates"""
for invoice in self:
for iline in invoice.invoice_line_ids:
if iline.product_id and iline.product_id.must_have_dates:
if not iline.start_date or not iline.end_date:
raise UserError(_(
"Missing Start Date and End Date for invoice "
"line with Product '%s' which has the "
"property 'Must Have Start and End Dates'.")
% (iline.product_id.name))
return super(AccountInvoice, self).action_move_create()
| gpl-3.0 |
piffey/ansible | test/units/modules/network/netscaler/test_netscaler_service.py | 39 | 14315 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
class TestNetscalerServiceModule(TestModule):
@classmethod
def setUpClass(cls):
m = MagicMock()
cls.service_mock = MagicMock()
cls.service_mock.__class__ = MagicMock()
cls.service_lbmonitor_binding_mock = MagicMock()
cls.lbmonitor_service_binding_mock = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.basic': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service.service': cls.service_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding': cls.service_lbmonitor_binding_mock,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.service_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding.lbmonitor_service_binding': cls.lbmonitor_service_binding_mock,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state=state,
))
def setUp(self):
super(TestNetscalerServiceModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerServiceModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_service
self.module = netscaler_service
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_service.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_service.nitro_exception', MockException):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_service(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_service_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[False, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
all_identical_mock = Mock(side_effect=[False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_service_when_monitor_bindings_differ(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[False, True])
all_identical_mock = Mock(side_effect=[False])
sync_monitor_bindings_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
all_identical=all_identical_mock,
sync_monitor_bindings=sync_monitor_bindings_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
# poor man's assert_called_once since python3.5 does not implement that mock method
self.assertEqual(len(sync_monitor_bindings_mock.mock_calls), 1, msg='sync monitor bindings not called once')
self.assertTrue(result['changed'], msg='Change not recorded')
def test_no_change_to_module_when_all_identical(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, True])
service_identical_mock = Mock(side_effect=[True, True])
monitor_bindings_identical_mock = Mock(side_effect=[True, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
service_identical=service_identical_mock,
monitor_bindings_identical=monitor_bindings_identical_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_service
result = self.exited()
self.assertFalse(result['changed'], msg='Erroneous changed status update')
def test_absent_operation(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[True, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'], msg='Changed status not set correctly')
def test_absent_operation_no_change(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
service_proxy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
service_proxy_mock.configure_mock(**attrs)
m = MagicMock(return_value=service_proxy_mock)
service_exists_mock = Mock(side_effect=[False, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
ConfigProxy=m,
service_exists=service_exists_mock,
):
self.module = netscaler_service
result = self.exited()
service_proxy_mock.assert_not_called()
self.assertFalse(result['changed'], msg='Changed status not set correctly')
def test_graceful_nitro_exception_operation_present(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation present'
)
def test_graceful_nitro_exception_operation_absent(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_service',
service_exists=m,
nitro_exception=MockException
):
self.module = netscaler_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
quarkonics/zstack-woodpecker | integrationtest/vm/multihosts/volumes/test_snapshot_robot_1_hour.py | 2 | 3493 | '''
Robot Test only includes Vm operations, Volume operations and Snapshot operations
Case will run 1 hour with fair strategy.
@author: Youyk
'''
import zstackwoodpecker.action_select as action_select
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.header.vm as vm_header
import os
import time
#Will sent 4000s as timeout, since case need to run at least ~3700s
_config_ = {
'timeout' : 4600,
'noparallel' : False
}
test_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('''Will mainly doing random test for all kinds of snapshot operations. VM, Volume and Image operations will also be tested. If reach 1 hour successful running condition, testing will success and quit. SG actions, and VIP actions are removed in this robot test.
VM resources: a special Utility vm is required to do volume attach/detach operation.
''')
target_running_vm = 4
public_l3 = test_lib.lib_get_l3_by_name(os.environ.get('l3PublicNetworkName'))
vm_create_option = test_util.VmOption()
#image has to use virtual router image, as it needs to do port checking
vm_create_option.set_image_uuid(test_lib.lib_get_image_by_name(img_name=os.environ.get('imageName_net')).uuid)
utility_vm_create_option = test_util.VmOption()
utility_vm_create_option.set_image_uuid(test_lib.lib_get_image_by_name(img_name=os.environ.get('imageName_net')).uuid)
l3_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName1')).uuid
utility_vm_create_option.set_l3_uuids([l3_uuid])
priority_actions = test_state.TestAction.snapshot_actions * 4
utility_vm = test_lib.lib_create_vm(utility_vm_create_option)
test_dict.add_utility_vm(utility_vm)
utility_vm.check()
test_util.test_dsc('Random Test Begin. Test target: 4 coexisting running VM (not include VR and SG target test VMs.).')
robot_test_obj = test_util.Robot_Test_Object()
robot_test_obj.set_test_dict(test_dict)
robot_test_obj.set_vm_creation_option(vm_create_option)
priority_action_obj = action_select.ActionPriority()
priority_action_obj.add_priority_action_list(priority_actions)
robot_test_obj.set_priority_actions(priority_action_obj)
robot_test_obj.set_exclusive_actions_list(\
test_state.TestAction.vip_actions + \
test_state.TestAction.image_actions + \
test_state.TestAction.sg_actions)
robot_test_obj.set_public_l3(public_l3)
robot_test_obj.set_utility_vm(utility_vm)
robot_test_obj.set_random_type(action_select.fair_strategy)
rounds = 1
current_time = time.time()
timeout_time = current_time + 3600
while time.time() <= timeout_time:
test_util.test_dsc('New round %s starts: random operation pickup.' % rounds)
test_lib.lib_vm_random_operation(robot_test_obj)
test_util.test_dsc('===============Round %s finished. Begin status checking.================' % rounds)
rounds += 1
test_lib.lib_robot_status_check(test_dict)
test_util.test_dsc('Reach test pass exit criterial: 1 hour.')
test_lib.lib_robot_cleanup(test_dict)
test_util.test_pass('Snapshots Robot Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_robot_cleanup(test_dict)
| apache-2.0 |
tylerjereddy/scipy | scipy/optimize/_constraints.py | 12 | 18650 | """Constraints definition for minimize."""
import numpy as np
from ._hessian_update_strategy import BFGS
from ._differentiable_functions import (
VectorFunction, LinearVectorFunction, IdentityVectorFunction)
from .optimize import OptimizeWarning
from warnings import warn
from numpy.testing import suppress_warnings
from scipy.sparse import issparse
def _arr_to_scalar(x):
# If x is a numpy array, return x.item(). This will
# fail if the array has more than one element.
return x.item() if isinstance(x, np.ndarray) else x
class NonlinearConstraint:
"""Nonlinear constraint on the variables.
The constraint has the general inequality form::
lb <= fun(x) <= ub
Here the vector of independent variables x is passed as ndarray of shape
(n,) and ``fun`` returns a vector with m components.
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
fun : callable
The function defining the constraint.
The signature is ``fun(x) -> array_like, shape (m,)``.
lb, ub : array_like
Lower and upper bounds on the constraint. Each array must have the
shape (m,) or be a scalar, in the latter case a bound will be the same
for all components of the constraint. Use ``np.inf`` with an
appropriate sign to specify a one-sided constraint.
Set components of `lb` and `ub` equal to represent an equality
constraint. Note that you can mix constraints of different types:
interval, one-sided or equality, by setting different components of
`lb` and `ub` as necessary.
jac : {callable, '2-point', '3-point', 'cs'}, optional
Method of computing the Jacobian matrix (an m-by-n matrix,
where element (i, j) is the partial derivative of f[i] with
respect to x[j]). The keywords {'2-point', '3-point',
'cs'} select a finite difference scheme for the numerical estimation.
A callable must have the following signature:
``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
Default is '2-point'.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
Method for computing the Hessian matrix. The keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme for
numerical estimation. Alternatively, objects implementing
`HessianUpdateStrategy` interface can be used to approximate the
Hessian. Currently available implementations are:
- `BFGS` (default option)
- `SR1`
A callable must return the Hessian matrix of ``dot(fun, v)`` and
must have the following signature:
``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
finite_diff_rel_step: None or array_like, optional
Relative step size for the finite difference approximation. Default is
None, which will select a reasonable value automatically depending
on a finite difference scheme.
finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations. A zero entry means
that a corresponding element in the Jacobian is identically zero.
If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used.
Notes
-----
Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
approximating either the Jacobian or the Hessian. We, however, do not allow
its use for approximating both simultaneously. Hence whenever the Jacobian
is estimated via finite-differences, we require the Hessian to be estimated
using one of the quasi-Newton strategies.
The scheme 'cs' is potentially the most accurate, but requires the function
to correctly handles complex inputs and be analytically continuable to the
complex plane. The scheme '3-point' is more accurate than '2-point' but
requires twice as many operations.
Examples
--------
Constrain ``x[0] < sin(x[1]) + 1.9``
>>> from scipy.optimize import NonlinearConstraint
>>> con = lambda x: x[0] - np.sin(x[1])
>>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
"""
def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
keep_feasible=False, finite_diff_rel_step=None,
finite_diff_jac_sparsity=None):
self.fun = fun
self.lb = lb
self.ub = ub
self.finite_diff_rel_step = finite_diff_rel_step
self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
self.jac = jac
self.hess = hess
self.keep_feasible = keep_feasible
class LinearConstraint:
"""Linear constraint on the variables.
The constraint has the general inequality form::
lb <= A.dot(x) <= ub
Here the vector of independent variables x is passed as ndarray of shape
(n,) and the matrix A has shape (m, n).
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
A : {array_like, sparse matrix}, shape (m, n)
Matrix defining the constraint.
lb, ub : array_like
Lower and upper bounds on the constraint. Each array must have the
shape (m,) or be a scalar, in the latter case a bound will be the same
for all components of the constraint. Use ``np.inf`` with an
appropriate sign to specify a one-sided constraint.
Set components of `lb` and `ub` equal to represent an equality
constraint. Note that you can mix constraints of different types:
interval, one-sided or equality, by setting different components of
`lb` and `ub` as necessary.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
"""
def __init__(self, A, lb, ub, keep_feasible=False):
self.A = A
self.lb = lb
self.ub = ub
self.keep_feasible = keep_feasible
class Bounds:
"""Bounds constraint on the variables.
The constraint has the general inequality form::
lb <= x <= ub
It is possible to use equal bounds to represent an equality constraint or
infinite bounds to represent a one-sided constraint.
Parameters
----------
lb, ub : array_like
Lower and upper bounds on independent variables. Each array must
have the same size as x or be a scalar, in which case a bound will be
the same for all the variables. Set components of `lb` and `ub` equal
to fix a variable. Use ``np.inf`` with an appropriate sign to disable
bounds on all or some variables. Note that you can mix constraints of
different types: interval, one-sided or equality, by setting different
components of `lb` and `ub` as necessary.
keep_feasible : array_like of bool, optional
Whether to keep the constraint components feasible throughout
iterations. A single value set this property for all components.
Default is False. Has no effect for equality constraints.
"""
def __init__(self, lb, ub, keep_feasible=False):
self.lb = np.asarray(lb)
self.ub = np.asarray(ub)
self.keep_feasible = keep_feasible
def __repr__(self):
start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
if np.any(self.keep_feasible):
end = f", keep_feasible={self.keep_feasible!r})"
else:
end = ")"
return start + end
class PreparedConstraint:
"""Constraint prepared from a user defined constraint.
On creation it will check whether a constraint definition is valid and
the initial point is feasible. If created successfully, it will contain
the attributes listed below.
Parameters
----------
constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
Constraint to check and prepare.
x0 : array_like
Initial vector of independent variables.
sparse_jacobian : bool or None, optional
If bool, then the Jacobian of the constraint will be converted
to the corresponded format if necessary. If None (default), such
conversion is not made.
finite_diff_bounds : 2-tuple, optional
Lower and upper bounds on the independent variables for the finite
difference approximation, if applicable. Defaults to no bounds.
Attributes
----------
fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
Function defining the constraint wrapped by one of the convenience
classes.
bounds : 2-tuple
Contains lower and upper bounds for the constraints --- lb and ub.
These are converted to ndarray and have a size equal to the number of
the constraints.
keep_feasible : ndarray
Array indicating which components must be kept feasible with a size
equal to the number of the constraints.
"""
def __init__(self, constraint, x0, sparse_jacobian=None,
finite_diff_bounds=(-np.inf, np.inf)):
if isinstance(constraint, NonlinearConstraint):
fun = VectorFunction(constraint.fun, x0,
constraint.jac, constraint.hess,
constraint.finite_diff_rel_step,
constraint.finite_diff_jac_sparsity,
finite_diff_bounds, sparse_jacobian)
elif isinstance(constraint, LinearConstraint):
fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
elif isinstance(constraint, Bounds):
fun = IdentityVectorFunction(x0, sparse_jacobian)
else:
raise ValueError("`constraint` of an unknown type is passed.")
m = fun.m
lb = np.asarray(constraint.lb, dtype=float)
ub = np.asarray(constraint.ub, dtype=float)
if lb.ndim == 0:
lb = np.resize(lb, m)
if ub.ndim == 0:
ub = np.resize(ub, m)
keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
if keep_feasible.ndim == 0:
keep_feasible = np.resize(keep_feasible, m)
if keep_feasible.shape != (m,):
raise ValueError("`keep_feasible` has a wrong shape.")
mask = keep_feasible & (lb != ub)
f0 = fun.f
if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
raise ValueError("`x0` is infeasible with respect to some "
"inequality constraint with `keep_feasible` "
"set to True.")
self.fun = fun
self.bounds = (lb, ub)
self.keep_feasible = keep_feasible
def violation(self, x):
"""How much the constraint is exceeded by.
Parameters
----------
x : array-like
Vector of independent variables
Returns
-------
excess : array-like
How much the constraint is exceeded by, for each of the
constraints specified by `PreparedConstraint.fun`.
"""
with suppress_warnings() as sup:
sup.filter(UserWarning)
ev = self.fun.fun(np.asarray(x))
excess_lb = np.maximum(self.bounds[0] - ev, 0)
excess_ub = np.maximum(ev - self.bounds[1], 0)
return excess_lb + excess_ub
def new_bounds_to_old(lb, ub, n):
"""Convert the new bounds representation to the old one.
The new representation is a tuple (lb, ub) and the old one is a list
containing n tuples, ith containing lower and upper bound on a ith
variable.
If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
None.
"""
lb = np.asarray(lb)
ub = np.asarray(ub)
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
lb = [float(x) if x > -np.inf else None for x in lb]
ub = [float(x) if x < np.inf else None for x in ub]
return list(zip(lb, ub))
def old_bound_to_new(bounds):
"""Convert the old bounds representation to the new one.
The new representation is a tuple (lb, ub) and the old one is a list
containing n tuples, ith containing lower and upper bound on a ith
variable.
If any of the entries in lb/ub are None they are replaced by
-np.inf/np.inf.
"""
lb, ub = zip(*bounds)
# Convert occurrences of None to -inf or inf, and replace occurrences of
# any numpy array x with x.item(). Then wrap the results in numpy arrays.
lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
for x in lb])
ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
for x in ub])
return lb, ub
def strict_bounds(lb, ub, keep_feasible, n_vars):
"""Remove bounds which are not asked to be kept feasible."""
strict_lb = np.resize(lb, n_vars).astype(float)
strict_ub = np.resize(ub, n_vars).astype(float)
keep_feasible = np.resize(keep_feasible, n_vars)
strict_lb[~keep_feasible] = -np.inf
strict_ub[~keep_feasible] = np.inf
return strict_lb, strict_ub
def new_constraint_to_old(con, x0):
"""
Converts new-style constraint objects to old-style constraint dictionaries.
"""
if isinstance(con, NonlinearConstraint):
if (con.finite_diff_jac_sparsity is not None or
con.finite_diff_rel_step is not None or
not isinstance(con.hess, BFGS) or # misses user specified BFGS
con.keep_feasible):
warn("Constraint options `finite_diff_jac_sparsity`, "
"`finite_diff_rel_step`, `keep_feasible`, and `hess`"
"are ignored by this method.", OptimizeWarning)
fun = con.fun
if callable(con.jac):
jac = con.jac
else:
jac = None
else: # LinearConstraint
if con.keep_feasible:
warn("Constraint option `keep_feasible` is ignored by this "
"method.", OptimizeWarning)
A = con.A
if issparse(A):
A = A.todense()
fun = lambda x: np.dot(A, x)
jac = lambda x: A
# FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
# use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
pcon = PreparedConstraint(con, x0)
lb, ub = pcon.bounds
i_eq = lb == ub
i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
i_bound_above = np.logical_xor(ub != np.inf, i_eq)
i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
if np.any(i_unbounded):
warn("At least one constraint is unbounded above and below. Such "
"constraints are ignored.", OptimizeWarning)
ceq = []
if np.any(i_eq):
def f_eq(x):
y = np.array(fun(x)).flatten()
return y[i_eq] - lb[i_eq]
ceq = [{"type": "eq", "fun": f_eq}]
if jac is not None:
def j_eq(x):
dy = jac(x)
if issparse(dy):
dy = dy.todense()
dy = np.atleast_2d(dy)
return dy[i_eq, :]
ceq[0]["jac"] = j_eq
cineq = []
n_bound_below = np.sum(i_bound_below)
n_bound_above = np.sum(i_bound_above)
if n_bound_below + n_bound_above:
def f_ineq(x):
y = np.zeros(n_bound_below + n_bound_above)
y_all = np.array(fun(x)).flatten()
y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
return y
cineq = [{"type": "ineq", "fun": f_ineq}]
if jac is not None:
def j_ineq(x):
dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
dy_all = jac(x)
if issparse(dy_all):
dy_all = dy_all.todense()
dy_all = np.atleast_2d(dy_all)
dy[:n_bound_below, :] = dy_all[i_bound_below]
dy[n_bound_below:, :] = -dy_all[i_bound_above]
return dy
cineq[0]["jac"] = j_ineq
old_constraints = ceq + cineq
if len(old_constraints) > 1:
warn("Equality and inequality constraints are specified in the same "
"element of the constraint list. For efficient use with this "
"method, equality and inequality constraints should be specified "
"in separate elements of the constraint list. ", OptimizeWarning)
return old_constraints
def old_constraint_to_new(ic, con):
"""
Converts old-style constraint dictionaries to new-style constraint objects.
"""
# check type
try:
ctype = con['type'].lower()
except KeyError as e:
raise KeyError('Constraint %d has no type defined.' % ic) from e
except TypeError as e:
raise TypeError(
'Constraints must be a sequence of dictionaries.'
) from e
except AttributeError as e:
raise TypeError("Constraint's type must be a string.") from e
else:
if ctype not in ['eq', 'ineq']:
raise ValueError("Unknown constraint type '%s'." % con['type'])
if 'fun' not in con:
raise ValueError('Constraint %d has no function defined.' % ic)
lb = 0
if ctype == 'eq':
ub = 0
else:
ub = np.inf
jac = '2-point'
if 'args' in con:
args = con['args']
fun = lambda x: con['fun'](x, *args)
if 'jac' in con:
jac = lambda x: con['jac'](x, *args)
else:
fun = con['fun']
if 'jac' in con:
jac = con['jac']
return NonlinearConstraint(fun, lb, ub, jac)
| bsd-3-clause |
rajsadho/django | tests/utils_tests/test_baseconv.py | 326 | 1787 | from unittest import TestCase
from django.utils.baseconv import (
BaseConverter, base2, base16, base36, base56, base62, base64,
)
from django.utils.six.moves import range
class TestBaseConv(TestCase):
def test_baseconv(self):
nums = [-10 ** 10, 10 ** 10] + list(range(-100, 100))
for converter in [base2, base16, base36, base56, base62, base64]:
for i in nums:
self.assertEqual(i, converter.decode(converter.encode(i)))
def test_base11(self):
base11 = BaseConverter('0123456789-', sign='$')
self.assertEqual(base11.encode(1234), '-22')
self.assertEqual(base11.decode('-22'), 1234)
self.assertEqual(base11.encode(-1234), '$-22')
self.assertEqual(base11.decode('$-22'), -1234)
def test_base20(self):
base20 = BaseConverter('0123456789abcdefghij')
self.assertEqual(base20.encode(1234), '31e')
self.assertEqual(base20.decode('31e'), 1234)
self.assertEqual(base20.encode(-1234), '-31e')
self.assertEqual(base20.decode('-31e'), -1234)
def test_base64(self):
self.assertEqual(base64.encode(1234), 'JI')
self.assertEqual(base64.decode('JI'), 1234)
self.assertEqual(base64.encode(-1234), '$JI')
self.assertEqual(base64.decode('$JI'), -1234)
def test_base7(self):
base7 = BaseConverter('cjdhel3', sign='g')
self.assertEqual(base7.encode(1234), 'hejd')
self.assertEqual(base7.decode('hejd'), 1234)
self.assertEqual(base7.encode(-1234), 'ghejd')
self.assertEqual(base7.decode('ghejd'), -1234)
def test_exception(self):
self.assertRaises(ValueError, BaseConverter, 'abc', sign='a')
self.assertIsInstance(BaseConverter('abc', sign='d'), BaseConverter)
| bsd-3-clause |
kennethreitz/pipenv | pipenv/vendor/dateutil/tz/_common.py | 10 | 12977 | from six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
| mit |
MikeAmy/django | tests/template_tests/filter_tests/test_truncatechars_html.py | 390 | 1229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatechars_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '...')
def test_truncate(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 6),
'<p>one...</p>',
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 11),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatechars_html('<b>\xc5ngstr\xf6m</b> was here', 5), '<b>\xc5n...</b>')
def test_truncate_something(self):
self.assertEqual(truncatechars_html('a<b>b</b>c', 3), 'a<b>b</b>c')
| bsd-3-clause |
malemburg/pythondotorg | pydotorg/resources.py | 10 | 3960 | from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.exceptions import Unauthorized
from tastypie.http import HttpUnauthorized
from tastypie.resources import ModelResource
from tastypie.throttle import CacheThrottle
from django.contrib.auth import get_user_model
class ApiKeyOrGuestAuthentication(ApiKeyAuthentication):
def _unauthorized(self):
# Allow guests anyway
return True
def is_authenticated(self, request, **kwargs):
"""
Copypasted from tastypie, modified to avoid issues with app-loading and
custom user model.
"""
User = get_user_model()
username_field = User.USERNAME_FIELD
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return self._unauthorized()
if not self.check_active(user):
return False
key_auth_check = self.get_key(user, api_key)
if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
request.user = user
return key_auth_check
def get_identifier(self, request):
if request.user.is_authenticated():
return super().get_identifier(request)
else:
# returns a combination of IP address and hostname.
return "%s_%s" % (request.META.get('REMOTE_ADDR', 'noaddr'), request.META.get('REMOTE_HOST', 'nohost'))
def check_active(self, user):
return True
class StaffAuthorization(Authorization):
"""
Everybody can read everything. Staff users can write everything.
"""
def read_list(self, object_list, bundle):
# Everybody can read
return object_list
def read_detail(self, object_list, bundle):
# Everybody can read
return True
def create_list(self, object_list, bundle):
if bundle.request.user.is_staff:
return object_list
else:
raise Unauthorized("Operation restricted to staff users.")
def create_detail(self, object_list, bundle):
return bundle.request.user.is_staff
def update_list(self, object_list, bundle):
if bundle.request.user.is_staff:
return object_list
else:
raise Unauthorized("Operation restricted to staff users.")
def update_detail(self, object_list, bundle):
return bundle.request.user.is_staff
def delete_list(self, object_list, bundle):
if not bundle.request.user.is_staff:
raise Unauthorized("Operation restricted to staff users.")
else:
return object_list
def delete_detail(self, object_list, bundle):
if not bundle.request.user.is_staff:
raise Unauthorized("Operation restricted to staff users.")
else:
return True
class OnlyPublishedAuthorization(StaffAuthorization):
"""
Only staff users can see unpublished objects.
"""
def read_list(self, object_list, bundle):
if not bundle.request.user.is_staff:
return object_list.filter(is_published=True)
else:
return super().read_list(object_list, bundle)
def read_detail(self, object_list, bundle):
if not bundle.request.user.is_staff:
return bundle.obj.is_published
else:
return super().read_detail(object_list, bundle)
class GenericResource(ModelResource):
class Meta:
authentication = ApiKeyOrGuestAuthentication()
authorization = StaffAuthorization()
throttle = CacheThrottle(throttle_at=600) # default is 150 req/hr
| apache-2.0 |
skiselev/upm | examples/python/md-stepper.py | 7 | 1988 | #!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from upm import pyupm_md as upmmd
def main():
I2C_BUS = upmmd.MD_I2C_BUS
I2C_ADDR = upmmd.MD_DEFAULT_I2C_ADDR
# Instantiate an I2C Motor Driver on I2C bus 0
myMotorDriver = upmmd.MD(I2C_BUS, I2C_ADDR)
# This example demonstrates using the MD to drive a stepper motor
# configure it, for this example, we'll assume 200 steps per rev
myMotorDriver.configStepper(200)
# set for half a rotation
myMotorDriver.setStepperSteps(100)
# let it go - clockwise rotation, 10 RPM speed
myMotorDriver.enableStepper(upmmd.MD_STEP_DIR_CW, 10)
time.sleep(3)
# Now do it backwards...
myMotorDriver.setStepperSteps(100)
myMotorDriver.enableStepper(upmmd.MD_STEP_DIR_CCW, 10)
# now disable
myMotorDriver.disableStepper()
if __name__ == '__main__':
main()
| mit |
mattrobenolt/django | django/db/__init__.py | 146 | 2374 | from django.core import signals
from django.db.utils import (DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY,
DataError, OperationalError, IntegrityError, InternalError, ProgrammingError,
NotSupportedError, DatabaseError, InterfaceError, Error, ConnectionHandler,
ConnectionRouter)
__all__ = [
'backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError',
'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError',
'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY'
]
connections = ConnectionHandler()
router = ConnectionRouter()
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
def __delattr__(self, name):
return delattr(connections[DEFAULT_DB_ALIAS], name)
def __eq__(self, other):
return connections[DEFAULT_DB_ALIAS] == other
def __ne__(self, other):
return connections[DEFAULT_DB_ALIAS] != other
connection = DefaultConnectionProxy()
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
| bsd-3-clause |
genodeftest/exaile | xlgui/preferences/__init__.py | 2 | 9035 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import Gtk
import inspect
import logging
import xl.unicode
from xl import xdg
from xl.nls import gettext as _
from xlgui import icons
from . import (
appearance,
collection,
cover,
lyrics,
playback,
playlists,
plugin,
widgets,
)
logger = logging.getLogger(__name__)
class PreferencesDialog(object):
"""
Preferences Dialog
"""
PAGES = (playlists, appearance, playback, collection, cover, lyrics)
PREFERENCES_DIALOG = None
def __init__(self, parent, main):
"""
Initializes the preferences dialog
"""
self.main = main
self.last_child = None
self.last_page = None
self.parent = parent
self.fields = {}
self.panes = {}
self.builders = {}
self.popup = None
self.builder = Gtk.Builder()
self.builder.set_translation_domain('exaile')
self.builder.add_from_file(
xdg.get_data_path('ui', 'preferences', 'preferences_dialog.ui')
)
self.builder.connect_signals(self)
self.window = self.builder.get_object('PreferencesDialog')
self.window.set_transient_for(parent)
self.window.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self.window.connect('delete-event', lambda *e: self.close())
self.box = self.builder.get_object('preferences_box')
self.tree = self.builder.get_object('preferences_tree')
self.model = self.builder.get_object('model')
title_cellrenderer = self.builder.get_object('title_cellrenderer')
title_cellrenderer.props.ypad = 3
self.default_icon = icons.MANAGER.pixbuf_from_icon_name(
'document-properties', Gtk.IconSize.MENU
)
# sets up the default panes
for page in self.PAGES:
icon = self.default_icon
if hasattr(page, 'icon'):
if isinstance(page.icon, GdkPixbuf.Pixbuf):
icon = page.icon
else:
icon = icons.MANAGER.pixbuf_from_icon_name(
page.icon, Gtk.IconSize.MENU
)
self.model.append(None, [page, page.name, icon])
# Use icon name to allow overrides
plugin_icon = icons.MANAGER.pixbuf_from_icon_name(
'extension', Gtk.IconSize.MENU
)
self.plug_root = self.model.append(None, [plugin, _('Plugins'), plugin_icon])
self._load_plugin_pages()
selection = self.tree.get_selection()
selection.connect('changed', self.switch_pane)
# Disallow selection on rows with no widget to show
selection.set_select_function(
(lambda sel, model, path, issel, dat: model[path][0] is not None), None
)
GLib.idle_add(selection.select_path, (0,))
def _load_plugin_pages(self):
self._clear_children(self.plug_root)
plugin_pages = []
plugin_manager = self.main.exaile.plugins
for name in plugin_manager.enabled_plugins:
plugin = plugin_manager.enabled_plugins[name]
if hasattr(plugin, 'get_preferences_pane'):
try:
plugin_pages.append(plugin.get_preferences_pane())
except Exception:
logger.exception('Error loading preferences pane')
plugin_pages.sort(key=lambda x: xl.unicode.strxfrm(x.name))
for page in plugin_pages:
icon = self.default_icon
if hasattr(page, 'icon'):
if isinstance(page.icon, GdkPixbuf.Pixbuf):
icon = page.icon
else:
icon = icons.MANAGER.pixbuf_from_icon_name(
page.icon, Gtk.IconSize.MENU
)
self.model.append(self.plug_root, [page, page.name, icon])
GLib.idle_add(self.tree.expand_row, self.model.get_path(self.plug_root), False)
def _clear_children(self, node):
remove = []
iter = self.model.iter_children(node)
while iter:
remove.append(iter)
iter = self.model.iter_next(iter)
for iter in remove:
self.model.remove(iter)
def on_close_button_clicked(self, widget):
"""
Called when the user clicks 'ok'
"""
self.close()
def close(self):
"""
Closes the preferences dialog
"""
if hasattr(self.last_page, 'page_leave'):
self.last_page.page_leave(self)
self.window.hide()
self.window.destroy()
PreferencesDialog.PREFERENCES_DIALOG = None
def switch_pane(self, selection):
"""
Switches a pane
"""
(model, iter) = selection.get_selected()
if not iter:
return
page = self.model.get_value(iter, 0)
if not page:
return
if self.last_child:
self.box.remove(self.last_child)
if self.last_page:
if hasattr(self.last_page, 'page_leave'):
self.last_page.page_leave(self)
self.last_page = page
child = self.panes.get(page)
if not child:
if hasattr(page, 'ui'):
builder = Gtk.Builder()
builder.add_from_file(page.ui)
else:
logger.error("No preference pane found")
return
child = builder.get_object('preferences_pane')
init = getattr(page, 'init', None)
if init:
init(self, builder)
self.panes[page] = child
self.builders[page] = builder
if page not in self.fields:
self._populate_fields(page, self.builders[page])
if hasattr(page, 'page_enter'):
page.page_enter(self)
child.unparent()
self.box.pack_start(child, True, True, 0)
self.last_child = child
self.box.show_all()
def _populate_fields(self, page, builder):
"""
Populates field pages
"""
self.fields[page] = []
attributes = dir(page)
for attr in attributes:
try:
klass = getattr(page, attr)
if inspect.isclass(klass) and issubclass(klass, widgets.Preference):
widget = builder.get_object(klass.name)
if not widget:
logger.warning('Invalid preferences widget: %s', klass.name)
continue
if issubclass(klass, widgets.Conditional):
klass.condition_widget = builder.get_object(
klass.condition_preference_name
)
elif issubclass(klass, widgets.MultiConditional):
for name in klass.condition_preference_names:
klass.condition_widgets[name] = builder.get_object(name)
field = klass(self, widget)
label_widget = builder.get_object('label:%s' % klass.name)
if label_widget:
field.label_widget = label_widget
self.fields[page].append(field)
except Exception:
logger.exception('Broken preferences class: %s', attr)
def run(self):
"""
Runs the dialog
"""
if PreferencesDialog.PREFERENCES_DIALOG:
self = PreferencesDialog.PREFERENCES_DIALOG
self.window.present()
else:
PreferencesDialog.PREFERENCES_DIALOG = self
self.window.show_all()
# vim: et sts=4 sw=4
| gpl-2.0 |
leeseulstack/openstack | neutron/tests/unit/hyperv/test_hyperv_utilsv2.py | 14 | 22389 | # Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the Hyper-V utils V2.
"""
import mock
from neutron.plugins.hyperv.agent import utils
from neutron.plugins.hyperv.agent import utilsv2
from neutron.tests import base
class TestHyperVUtilsV2(base.BaseTestCase):
_FAKE_VSWITCH_NAME = "fake_vswitch_name"
_FAKE_PORT_NAME = "fake_port_name"
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_VSWITCH = "fake_vswitch"
_FAKE_VLAN_ID = "fake_vlan_id"
_FAKE_CLASS_NAME = "fake_class_name"
_FAKE_ELEMENT_NAME = "fake_element_name"
_FAKE_HYPERV_VM_STATE = 'fake_hyperv_state'
_FAKE_ACL_ACT = 'fake_acl_action'
_FAKE_ACL_DIR = 'fake_acl_dir'
_FAKE_ACL_TYPE = 'fake_acl_type'
_FAKE_LOCAL_PORT = 'fake_local_port'
_FAKE_PROTOCOL = 'fake_port_protocol'
_FAKE_REMOTE_ADDR = '0.0.0.0/0'
_FAKE_WEIGHT = 'fake_weight'
def setUp(self):
super(TestHyperVUtilsV2, self).setUp()
self._utils = utilsv2.HyperVUtilsV2()
self._utils._wmi_conn = mock.MagicMock()
def test_connect_vnic_to_vswitch_found(self):
self._test_connect_vnic_to_vswitch(True)
def test_connect_vnic_to_vswitch_not_found(self):
self._test_connect_vnic_to_vswitch(False)
def _test_connect_vnic_to_vswitch(self, found):
self._utils._get_vnic_settings = mock.MagicMock()
if not found:
mock_vm = mock.MagicMock()
self._utils._get_vm_from_res_setting_data = mock.MagicMock(
return_value=mock_vm)
self._utils._add_virt_resource = mock.MagicMock()
else:
self._utils._modify_virt_resource = mock.MagicMock()
self._utils._get_vswitch = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock()
mock_port = mock.MagicMock()
self._utils._get_switch_port_allocation.return_value = (mock_port,
found)
self._utils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME,
self._FAKE_PORT_NAME)
if not found:
self._utils._add_virt_resource.assert_called_with(mock_vm,
mock_port)
else:
self._utils._modify_virt_resource.assert_called_with(mock_port)
def test_add_virt_resource(self):
self._test_virt_method('AddResourceSettings', 3, '_add_virt_resource',
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def test_add_virt_feature(self):
self._test_virt_method('AddFeatureSettings', 3, '_add_virt_feature',
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def test_modify_virt_resource(self):
self._test_virt_method('ModifyResourceSettings', 3,
'_modify_virt_resource', False,
ResourceSettings=[self._FAKE_RES_DATA])
def test_remove_virt_resource(self):
self._test_virt_method('RemoveResourceSettings', 2,
'_remove_virt_resource', False,
ResourceSettings=[self._FAKE_RES_PATH])
def test_remove_virt_feature(self):
self._test_virt_method('RemoveFeatureSettings', 2,
'_remove_virt_feature', False,
FeatureSettings=[self._FAKE_RES_PATH])
def _test_virt_method(self, vsms_method_name, return_count,
utils_method_name, with_mock_vm, *args, **kwargs):
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
vsms_method = getattr(mock_svc, vsms_method_name)
mock_rsd = self._mock_vsms_method(vsms_method, return_count)
if with_mock_vm:
mock_vm = mock.MagicMock()
mock_vm.path_.return_value = self._FAKE_VM_PATH
getattr(self._utils, utils_method_name)(mock_vm, mock_rsd)
else:
getattr(self._utils, utils_method_name)(mock_rsd)
if args:
vsms_method.assert_called_once_with(*args)
else:
vsms_method.assert_called_once_with(**kwargs)
def _mock_vsms_method(self, vsms_method, return_count):
args = None
if return_count == 3:
args = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
else:
args = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
vsms_method.return_value = args
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._utils._check_job_status = mock.MagicMock()
return mock_res_setting_data
def test_disconnect_switch_port_delete_port(self):
self._test_disconnect_switch_port(True)
def test_disconnect_switch_port_modify_port(self):
self._test_disconnect_switch_port(False)
def _test_disconnect_switch_port(self, delete_port):
self._utils._get_switch_port_allocation = mock.MagicMock()
mock_sw_port = mock.MagicMock()
self._utils._get_switch_port_allocation.return_value = (mock_sw_port,
True)
if delete_port:
self._utils._remove_virt_resource = mock.MagicMock()
else:
self._utils._modify_virt_resource = mock.MagicMock()
self._utils.disconnect_switch_port(self._FAKE_VSWITCH_NAME,
self._FAKE_PORT_NAME,
delete_port)
if delete_port:
self._utils._remove_virt_resource.assert_called_with(mock_sw_port)
else:
self._utils._modify_virt_resource.assert_called_with(mock_sw_port)
def test_get_vswitch(self):
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = [
self._FAKE_VSWITCH]
vswitch = self._utils._get_vswitch(self._FAKE_VSWITCH_NAME)
self.assertEqual(self._FAKE_VSWITCH, vswitch)
def test_get_vswitch_not_found(self):
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = []
self.assertRaises(utils.HyperVException, self._utils._get_vswitch,
self._FAKE_VSWITCH_NAME)
def test_get_vswitch_external_port(self):
mock_vswitch = mock.MagicMock()
mock_sw_port = mock.MagicMock()
mock_vswitch.associators.return_value = [mock_sw_port]
mock_le = mock_sw_port.associators.return_value
mock_le.__len__.return_value = 1
mock_le1 = mock_le[0].associators.return_value
mock_le1.__len__.return_value = 1
vswitch_port = self._utils._get_vswitch_external_port(mock_vswitch)
self.assertEqual(mock_sw_port, vswitch_port)
def test_set_vswitch_port_vlan_id(self):
mock_port_alloc = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port_alloc, True))
self._utils._get_vlan_setting_data_from_port_alloc = mock.MagicMock()
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.RemoveFeatureSettings.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
mock_vlan_settings = mock.MagicMock()
self._utils._get_vlan_setting_data = mock.MagicMock(return_value=(
mock_vlan_settings, True))
mock_svc.AddFeatureSettings.return_value = (self._FAKE_JOB_PATH,
None,
self._FAKE_RET_VAL)
self._utils.set_vswitch_port_vlan_id(self._FAKE_VLAN_ID,
self._FAKE_PORT_NAME)
self.assertTrue(mock_svc.RemoveFeatureSettings.called)
self.assertTrue(mock_svc.AddFeatureSettings.called)
def test_get_setting_data(self):
self._utils._get_first_item = mock.MagicMock(return_value=None)
mock_data = mock.MagicMock()
self._utils._get_default_setting_data = mock.MagicMock(
return_value=mock_data)
ret_val = self._utils._get_setting_data(self._FAKE_CLASS_NAME,
self._FAKE_ELEMENT_NAME,
True)
self.assertEqual(ret_val, (mock_data, False))
def test_enable_port_metrics_collection(self):
mock_port = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port, True))
mock_acl = mock.MagicMock()
with mock.patch.multiple(
self._utils,
_get_default_setting_data=mock.MagicMock(return_value=mock_acl),
_add_virt_feature=mock.MagicMock()):
self._utils.enable_port_metrics_collection(self._FAKE_PORT_NAME)
self.assertEqual(4, len(self._utils._add_virt_feature.mock_calls))
self._utils._add_virt_feature.assert_called_with(
mock_port, mock_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_ok(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
mock_metric_def = mock.MagicMock()
mock_port = mock.MagicMock()
mock_get_port_allocation.return_value = (mock_port, True)
mock_metrics_def_source.return_value = [mock_metric_def]
m_call = mock.call(Subject=mock_port.path_.return_value,
Definition=mock_metric_def.path_.return_value,
MetricCollectionEnabled=self._utils._METRIC_ENABLED)
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
mock_metrics_svc.ControlMetrics.assert_has_calls([m_call, m_call])
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_no_port(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_get_port_allocation.return_value = (None, False)
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_no_def(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
mock_port = mock.MagicMock()
mock_get_port_allocation.return_value = (mock_port, True)
mock_metrics_def_source.return_value = None
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._is_port_vm_started')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_can_enable_control_metrics_true(self, mock_get, mock_is_started):
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_METER
self._test_can_enable_control_metrics(mock_get, mock_is_started,
[mock_acl, mock_acl], True)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._is_port_vm_started')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_can_enable_control_metrics_false(self, mock_get, mock_is_started):
self._test_can_enable_control_metrics(mock_get, mock_is_started, [],
False)
def _test_can_enable_control_metrics(self, mock_get_port, mock_vm_started,
acls, expected_result):
mock_port = mock.MagicMock()
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_METER
mock_port.associators.return_value = acls
mock_get_port.return_value = (mock_port, True)
mock_vm_started.return_value = True
result = self._utils.can_enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(expected_result, result)
def test_is_port_vm_started_true(self):
self._test_is_port_vm_started(self._utils._HYPERV_VM_STATE_ENABLED,
True)
def test_is_port_vm_started_false(self):
self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False)
def _test_is_port_vm_started(self, vm_state, expected_result):
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
mock_port = mock.MagicMock()
mock_vmsettings = mock.MagicMock()
mock_summary = mock.MagicMock()
mock_summary.EnabledState = vm_state
mock_vmsettings.path_.return_value = self._FAKE_RES_PATH
mock_port.associators.return_value = [mock_vmsettings]
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
result = self._utils._is_port_vm_started(mock_port)
self.assertEqual(expected_result, result)
mock_svc.GetSummaryInformation.assert_called_once_with(
[self._utils._VM_SUMMARY_ENABLED_STATE],
[self._FAKE_RES_PATH])
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._bind_security_rule')
def test_create_default_reject_all_rules(self, mock_bind, mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
m_acl.Action = self._utils._ACL_ACTION_DENY
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
calls = []
ipv4_pair = (self._utils._ACL_TYPE_IPV4, self._utils._IPV4_ANY)
ipv6_pair = (self._utils._ACL_TYPE_IPV6, self._utils._IPV6_ANY)
for direction in [self._utils._ACL_DIR_IN, self._utils._ACL_DIR_OUT]:
for acl_type, address in [ipv4_pair, ipv6_pair]:
for protocol in [self._utils._TCP_PROTOCOL,
self._utils._UDP_PROTOCOL,
self._utils._ICMP_PROTOCOL]:
calls.append(mock.call(m_port, direction, acl_type,
self._utils._ACL_ACTION_DENY,
self._utils._ACL_DEFAULT,
protocol, address, mock.ANY))
self._utils._remove_virt_feature.assert_called_once_with(m_acl)
self._utils._bind_security_rule.assert_has_calls(calls)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._bind_security_rule')
def test_create_default_reject_all_rules_already_added(self, mock_bind,
mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
m_acl.Action = self._utils._ACL_ACTION_DENY
m_port.associators.return_value = [
m_acl] * self._utils._REJECT_ACLS_COUNT
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
self.assertFalse(self._utils._remove_virt_feature.called)
self.assertFalse(self._utils._bind_security_rule.called)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._add_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._create_security_acl')
def test_bind_security_rule(self, mock_create_acl, mock_add, mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
mock_create_acl.return_value = m_acl
self._utils._bind_security_rule(
m_port, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
self._FAKE_ACL_ACT, self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL,
self._FAKE_REMOTE_ADDR, self._FAKE_WEIGHT)
self._utils._add_virt_feature.assert_called_once_with(m_port, m_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
def test_remove_security_rule(self, mock_remove_feature):
mock_acl = self._setup_security_rule_test()[1]
self._utils.remove_security_rule(
self._FAKE_PORT_NAME, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
self._utils._remove_virt_feature.assert_called_once_with(mock_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_multiple_virt_features')
def test_remove_all_security_rules(self, mock_remove_feature):
mock_acl = self._setup_security_rule_test()[1]
self._utils.remove_all_security_rules(self._FAKE_PORT_NAME)
self._utils._remove_multiple_virt_features.assert_called_once_with(
[mock_acl])
def _setup_security_rule_test(self):
mock_port = mock.MagicMock()
mock_acl = mock.MagicMock()
mock_port.associators.return_value = [mock_acl]
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port, True))
self._utils._filter_security_acls = mock.MagicMock(
return_value=[mock_acl])
return (mock_port, mock_acl)
def test_filter_acls(self):
mock_acl = mock.MagicMock()
mock_acl.Action = self._FAKE_ACL_ACT
mock_acl.Applicability = self._utils._ACL_APPLICABILITY_LOCAL
mock_acl.Direction = self._FAKE_ACL_DIR
mock_acl.AclType = self._FAKE_ACL_TYPE
mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR
acls = [mock_acl, mock_acl]
good_acls = self._utils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR,
self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR)
bad_acls = self._utils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE)
self.assertEqual(acls, good_acls)
self.assertEqual([], bad_acls)
class TestHyperVUtilsV2R2(base.BaseTestCase):
_FAKE_ACL_ACT = 'fake_acl_action'
_FAKE_ACL_DIR = 'fake_direction'
_FAKE_ACL_TYPE = 'fake_acl_type'
_FAKE_LOCAL_PORT = 'fake_local_port'
_FAKE_PROTOCOL = 'fake_port_protocol'
_FAKE_REMOTE_ADDR = '10.0.0.0/0'
def setUp(self):
super(TestHyperVUtilsV2R2, self).setUp()
self._utils = utilsv2.HyperVUtilsV2R2()
def test_filter_security_acls(self):
self._test_filter_security_acls(
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
def test_filter_security_acls_default(self):
default = self._utils._ACL_DEFAULT
self._test_filter_security_acls(
default, default, self._FAKE_REMOTE_ADDR)
def _test_filter_security_acls(self, local_port, protocol, remote_addr):
acls = []
default = self._utils._ACL_DEFAULT
for port, proto in [(default, default), (local_port, protocol)]:
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_ALLOW
mock_acl.Direction = self._FAKE_ACL_DIR
mock_acl.LocalPort = port
mock_acl.Protocol = proto
mock_acl.RemoteIPAddress = remote_addr
acls.append(mock_acl)
right_acls = [a for a in acls if a.LocalPort == local_port]
good_acls = self._utils._filter_security_acls(
acls, mock_acl.Action, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
local_port, protocol, remote_addr)
bad_acls = self._utils._filter_security_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
local_port, protocol, remote_addr)
self.assertEqual(right_acls, good_acls)
self.assertEqual([], bad_acls)
def test_get_new_weight(self):
mockacl1 = mock.MagicMock()
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
mockacl2 = mock.MagicMock()
mockacl2.Weight = self._utils._MAX_WEIGHT - 3
self.assertEqual(self._utils._MAX_WEIGHT - 2,
self._utils._get_new_weight([mockacl1, mockacl2]))
def test_get_new_weight_no_acls(self):
self.assertEqual(self._utils._MAX_WEIGHT - 1,
self._utils._get_new_weight([]))
def test_get_new_weight_default_acls(self):
mockacl1 = mock.MagicMock()
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
mockacl2 = mock.MagicMock()
mockacl2.Weight = self._utils._MAX_WEIGHT - 2
mockacl2.Action = self._utils._ACL_ACTION_DENY
self.assertEqual(self._utils._MAX_WEIGHT - 2,
self._utils._get_new_weight([mockacl1, mockacl2]))
| apache-2.0 |
gizela/gizela | gizela/data/NetworkAdjList.py | 1 | 23873 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <tomas.kubin@fsv.cvut.cz>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id: NetworkAdjList.py 116 2010-12-17 16:04:37Z tomaskubin $
from gizela.util.Error import Error
from gizela.data.NetworkList import NetworkList
from gizela.data.EpochPointList import EpochPointList
from gizela.data.PointList import PointList
from gizela.data.PointListCovMat import PointListCovMat
from gizela.data.point_text_table import gama_coor_table
from gizela.text.TextTable import TextTable
from gizela.data.DUPLICATE_ID import DUPLICATE_ID
from gizela.stat.PointLocalGamaDisplTest import PointLocalGamaDisplTest
from gizela.util.CoordSystemLocal3D import CoordSystemLocal3D
from gizela.data.Network import Network
from gizela.data.StandardDeviation import StandardDeviation
import datetime, copy
class NetworkAdjListError(Error): pass
class NetworkAdjList(NetworkList):
"""
list of Epoch instances
like project
"""
def __init__(self,
coordSystemLocal,
stdevUseApriori=True,
confProb=0.95,
reliabProb=0.95,
testId=None,
duplicateIdFix=DUPLICATE_ID.compare,
duplicateIdAdj=DUPLICATE_ID.compare,
textTable=None):
"""
stdevUseApriori: use apriori/aposteriori standard deviation
confProb: confidence probability
reliabProb: reliability probability
testId: Ids of point for testing (currently only for output)
duplicateFixId: what to do with duplicate fixed points in epoch
textTable: text table instance for epochPointList
"""
NetworkList.__init__(self,
coordSystemLocal,
stdevUseApriori=stdevUseApriori,
confProb=confProb,
#reliabProb=reliabProb,
#duplicateIdFix=duplicateIdFix,
duplicateIdAdj=duplicateIdAdj)
self.pointListAdj = PointList(textTable=gama_coor_table(),
duplicateId=duplicateIdFix)
# list of fixed points for all epochs
self.epochPointList = EpochPointList(idList=testId)
# list of coordinates, displacements and test results
# PointLocalGamaDisplTest instances
self.stdevList = []
# list of StandardDeviation instances
# handles degrees of freedom from GamaLocalData instance
##self.testType = None
if textTable is None:
from gizela.stat.displ_test_text_table import displ_test_text_table
self._textTable = displ_test_text_table()
else:
self._textTable = textTable
self._reliabProb = reliabProb
def _get_textTable(self):
return self._textTable
def _set_textTable(self, textTable):
self._textTable = textTable
for epoch in self.list:
epoch.pointListAdjCovMat.textTable = textTable
textTable = property(_get_textTable, _set_textTable)
def append(self, net):
"""
adds GamaLocalData instance to list of nets
"""
if not isinstance(net, Network):
raise NetworkAdjListError, "Network instance expected"
NetworkList.append(self, net)
# standard deviation
net.stdev.set_use_apriori(self._stdevUseApriori)
net.stdev.set_conf_prob(self._confProb)
net.pointListAdjCovMat.covmat.useApriori = self._stdevUseApriori
self.stdevList.append(net.stdev)
# change textTable of pointList
net.pointListAdjCovMat.textTable = self._textTable
# compute displacements of net added
#
# compute differences from "zero" epoch
for pp in net.pointListAdjCovMat:
# change type of point
p = PointLocalGamaDisplTest(pp)
#p.epochIndex = len(self)# - 1
p.epochIndex = len(self) - 1
#import sys
#from gizela.data.point_text_table import gama_coor_stdev_table
#print >>sys.stderr, pp.with_text_table(gama_coor_stdev_table())
#print >>sys.stderr, p.with_text_table(gama_coor_stdev_table())
# find point in "zero" epoch
p0 = None
if p.id in self.epochPointList.iter_id():
for pp in self.epochPointList.iter_point(p.id):
if p.is_set_xyz() and pp.is_set_xyz():
p0 = pp
break
elif p.is_set_xy() and pp.is_set_xy():
p0 = pp
break
elif p.is_set_z() and pp.is_set_z():
p0 = pp
break
if p0 is not None:
#print "type p", type(p)
#print "minus", type((p - p0).covmat)
#from gizela.data.point_text_table import gama_coor_stdev_table
#print (p-p0).with_text_table(gama_coor_stdev_table())
p.set_displacement(p - p0)
net.pointListAdjCovMat.replace_point(p)
# compute differences - previous epoch
#if self.numEpoch > 1:
# lastEpoch = self.epochPointList.get_epoch(-1)
# for p in pl:
# if p.id in lastEpoch:
# # compute displacement
# pd = p - lastEpoch.get_point(p.id)
# #print pd.id, pd.covmat.data
# # add displacement
# p.set_displacement(pd)
#from gizela.stat.displ_test_text_table import point_displ_text_table
#pl.textTable = point_displ_text_table()
#print pl
# add adjusted points with displacements
self.epochPointList.add_(net.pointListAdjCovMat)
#for p in self.epochPointList.get_epoch(-1):
# print p.id,"covmat", p.covmat.make_gama_xml()
# print p.id,"var", p.var
# add fixed points
self.pointListAdj.extend(net.pointListAdj)
def append_joined(self, data, reString, epochIndex, pointIndex):
"""
separate data with joined epochs and add them
data: Network instance with adjustment
reString: regular expression with two groups - point id,
- epoch index from 0
epochIndex: index of epoch number (0 or 1) in regular expression groups
pointIndex: index of point id (0 or 1) in regular expression groups
"""
if not isinstance(data, Network):
raise NetworkAdjList, "Network instance expected"
# set text table
data.pointListAdjCovMat.textTable = self._textTable
# separate epochs
self.epochPointList.add_joined(data.pointListAdjCovMat,
reString,
epochIndex, pointIndex)
# add epochs
for i in xrange(self.epochPointList.get_num_epoch()):
self.list.append(data)
self.dateTimeList.extend(data.dateTimeList)
# standard deviation
data.stdev.set_use_apriori(self._stdevUseApriori)
data.stdev.set_conf_prob(self._confProb)
data.pointListAdjCovMat.covmat.useApriori = self._stdevUseApriori
for i in xrange(self.epochPointList.get_num_epoch()):
self.stdevList.append(data.stdev)
# compute displacements
#
# change type of points
for id, ind in self.epochPointList.index.items():
#print id, ind
for i, ii in enumerate(ind):
if ii is not None:
pp = PointLocalGamaDisplTest(\
self.epochPointList.list[i].list[ii])
pp.epochIndex = i
self.epochPointList.list[i].list[ii] = pp
#print self.epochPointList.list[i].list[ii]
# add fixed points
self.pointListAdj.extend(data.pointListAdj)
# compute differences from "zero" epoch
for id, ind in self.epochPointList.index.items():
for i, ii in enumerate(ind):
if ii is None:
continue
p = self.epochPointList.list[i].list[ii]
p0 = None
for j, jj in enumerate(ind[:i]):
if jj is None:
continue
pp = self.epochPointList.list[j].list[jj]
if p.is_set_xyz() and pp.is_set_xyz():
p0 = pp
break
elif p.is_set_xy() and pp.is_set_xy():
p0 = pp
break
elif p.is_set_z() and pp.is_set_z():
p0 = pp
break
if p0 is None:
# point in "zero" epoch not found
continue
p.set_displacement(p - p0)
def get_epoch_point_list(self, index):
return self.epochPointList.get_epoch(index)
def __str__(self):
tt = TextTable([("epoch", "%5i"), ("date","%10s"), ("time","%8s")])
str = [tt.make_table_head()]
str.extend([tt.make_table_row(i, dt.date(), dt.time())\
for dt,i in zip(self.dateTimeList,
xrange(len(self.dateTimeList)))])
str.append(tt.make_table_foot())
str.append("\n")
str.append(self.epochPointList.__str__())
return "".join(str)
def get_epoch_table(self, index=None):
"""
returns text table of displacements
index: integer: index of epoch, None = all epoch
"""
str = []
if index is None:
for i, e in enumerate(self.epochPointList.iter_epoch()):
str.append("Epoch %i:" % i)
str.append(e.__str__())
else:
str.append("Epoch %i:" % index)
str.append(self.epochPointList.get_epoch(index).__str__())
return "\n".join(str)
def set_stdev_use(self, use):
"""
set which standart deviation to use
"""
if use is "apriori":
self._stdevUseApriori = True
for pl in self.epochPointList.iter_epoch():
pl.covmat.useApriori = True
for s in self.stdevList:
s.set_stdev_use_apriori()
elif use is "aposteriori":
self._stdevUseApriori = False
for pl in self.epochPointList.iter_epoch():
pl.covmat.useApriori = False
for s in self.stdevList:
s.set_stdev_use_aposteriori()
else:
raise EpochListError, "Unknown value of parameter use: %s" % use
def set_stdev_use_apriori(self):
self.set_stdev_use("apriori")
def set_stdev_use_aposteriori(self):
self.set_stdev_use("aposteriori")
def get_stdev_use(self):
return self._stdevUseApriori and "apriori" or "aposteriori"
def plot_xy(self, figure, idAdj=None, idFix=None, plotTest=False):
"""
plots figure with points and error ellipses and displacements
figure: FigureLayoutBase instance or descendant
idAdj: list of ids of adjusted points to be drawn
for no adjusted points set idAdj = []
idFix: plot fixed points
for no fixed points set idFix = []
"""
# id of points
if idAdj is None:
idAdj = [id for id in self.epochPointList.iter_id()]
if idFix is None:
idFix = [id for id in self.pointListAdj.iter_id()]
# plot adjusted
if len(idAdj) > 0:
self.plot_adj(figure, idAdj, plotErrorZ=False, plotTest=plotTest)
# plot fixed
if len(idFix) > 0:
self.plot_fix(figure, idFix)
def plot_xyz(self, figure, idAdj=None, plotTest=False):
"""
plots figure with points, error ellipses, displacements
and confidence interval of z along y axis
figure: FigureLayoutBase instance or descendant
idAdj: list of ids of adjusted points to be drawn
for no adjusted points set idAdj = []
#idFix: plot fixed points
# for no fixed points set idFix = []
"""
# id of points
if idAdj is None:
idAdj = [id for id in self.epochPointList.iter_id()]
#if idFix is None:
# idFix = [id for id in self.pointListFix.iter_id()]
# plot adjusted
if len(idAdj) > 0:
self.plot_adj(figure, idAdj, plotErrorZ=True, plotTest=plotTest)
# plot fixed
#if len(idFix) > 0:
# self.plot_fix(figure, idFix)
def plot_z(self, figure, id, plotTest=False):
"""plot x coordinates of point id for all epochs with stdev
self: EpochList isntance
id: id or ids of point
"""
if type(id) is not tuple and type(id) is not list:
id = (id,)
# set figure
figure.set_color_style(self)
figure.gca().xaxis_date()
# plot points
for idi in id:
self._plot_epoch_list_displacement_z(figure, idi, plotTest)
figure.reset_epoch_counter()
# set free space around draw
figure.set_free_space()
#update
figure.update_(self)
def _plot_epoch_list_displacement_z(self, figure, id, plotTest):
"""
plot 2D graph of point z coordinates,
confidence intervals of z coordinates and displacements for all epochs
id: point id
plotTest: plot results of test?
"""
# point iterator
pointIter = self.epochPointList.iter_point(id)
# plot points and stdev
z = [] # z coordinates of points
for point, date in zip(pointIter, self.dateTimeList):
point.plot_z(figure, date)
if plotTest:
if point.testPassed is not None: # point is tested
point.plot_z_stdev(figure, date)
else:
point.plot_z_stdev(figure, date)
figure.next_point_dot_style()
z.append(point.z)
point0 = None
for i, p in enumerate(self.epochPointList.iter_point(id)):
if p.is_set_z():
point0 = p
i_epoch = i
break
# label point0
if point0 is not None:
point0.x = self.dateTimeList[i_epoch]
point0.y = point.z
point0.plot_label(figure)
# plot vector
figure.plot_vector_z(self.dateTimeList, z)
# plot zero line
if point0 is not None:
z0 = [point0.z for t in self.dateTimeList]
figure.plot_vector_z0(self.dateTimeList, z0)
def plot_fix(self, figure, id):
"""
plots 2D graph of fixed points
"""
figure.set_aspect_equal()
figure.update_(self)
for idi in id:
self.pointListAdj.plot_(figure)
# set free space around draw
figure.set_free_space()
def plot_adj(self, figure, id, plotErrorZ, plotTest):
"""
plots 2D graph of adjusted points, displacements and error ellipses
figure: FigureLayoutBase instance
id: id or ids of point
plotErrorZ: plot confidence interval of z coordinate?
plotTest: plot results of test?
"""
if type(id) is not tuple and type(id) is not list:
id = [id]
# update figure
figure.set_aspect_equal()
figure.update_(self)
# plot points
for idi in id:
self._plot_epoch_list_displacement_xy(figure, idi, plotErrorZ,
plotTest)
figure.reset_epoch_counter()
# set free space around draw
figure.set_free_space()
def _plot_epoch_list_displacement_xy(self, figure, id,
plotErrorZ, plotTest):
"""
plot 2D graph of point ,
error ellipses and displacements for all epochs
and optionally confidence interval of z coordinate along y axis
id: point id
plotErrorZ: plot confidence interval of z coordinate?
plotTest: plot results of test?
"""
# find zero epoch for point
# the first point with x!=None and y!=None
point0 = None
for p in self.epochPointList.iter_point(id, withNone=False):
if p.x != None and p.y != None:
point0 = p
#x0, y0 = point0.x, point0.y
break
if point0 == None:
return # no xy point available
# save coordinates of vector with scale
pointScaled = [] # point list as normal list
for p in self.epochPointList.iter_point(id, withNone=True):
if plotTest and isinstance(p, PointLocalGamaDisplTest):
covmat = p.displ.get_point_cov_mat()
else:
covmat = p.get_point_cov_mat()
ps = (p - point0)*figure.errScale + point0
ps.covmat = covmat
#import sys
#print >>sys.stderr, "Point: %s" % ps.id
#print >>sys.stderr, "epl: %s" % type(ps)
pointScaled.append(ps)
# plot points and error ellipses
for p in pointScaled:
p.plot_(figure, plotLabel=False)
figure.set_stdev(self) # sets StandardDeviation instance for epoch
if plotTest and isinstance(p, PointLocalGamaDisplTest):
if p.testPassed is not None: # point is tested
p.plot_error_ellipse(figure)
else:
p.plot_error_ellipse(figure)
figure.next_point_dot_style()
# label point0
point0_label = copy.deepcopy(point0)
point0_label.id = id
point0_label.plot_label(figure)
# plot vector
figure.plot_vector_xy(pointScaled)
def compute_displacement_test(self, pointId=None, testType=None):
"""
computes test statistic for displacements of points
pointId: ids of point to be tested
testType: type of test see DisplacementTestType class
or None for testing according to point dimension
"""
#self.testType = testType
from gizela.stat.DisplacementTest import DisplacementTest
dtest = DisplacementTest(apriori=self._stdevUseApriori,
confProb=self._confProb,
reliabProb=self._reliabProb)
dtest.compute_test(self.epochPointList, pointId, testType)
def get_conf_prob(self):
return self._confProb
def set_conf_prob(sefl, confProb):
self._confProb = confProb
for s in self.stdevList:
s.set_conf_prob(confProb)
def _num_epoch(self): return len(self)
numEpoch = property(_num_epoch)
if __name__ == "__main__":
from gizela.data.GamaLocalDataAdj import GamaLocalDataAdj
try:
file = [
open("../../example/xml-epoch/epoch0.adj.xml"),
open("../../example/xml-epoch/epoch1.adj.xml"),
open("../../example/xml-epoch/epoch2.adj.xml"),
open("../../example/xml-epoch/epoch3.adj.xml"),
open("../../example/xml-epoch/epoch4.adj.xml"),
open("../../example/xml-epoch/epoch5.adj.xml")
]
except IOError, e:
print e
print "Try to run make in directory gizela/trunk/example/xml-epoch"
import sys
sys.exit(1)
#date = [
# datetime.date(2000,1,1),
# datetime.date(2001,1,1),
# datetime.date(2002,1,1),
# datetime.date(2003,1,1),
# datetime.date(2004,1,1),
# datetime.date(2005,1,1)
# ]
from gizela.util.CoordSystemLocal3D import CoordSystemLocal3D
c3d = CoordSystemLocal3D()
c3d.axesOri = "ne"
epl = NetworkAdjList(coordSystemLocal=c3d, stdevUseApriori=True)
#epl = NetworkAdjList(stdevUseApriori=False)
from gizela.data.Network import Network
for f in file:
adj = GamaLocalDataAdj()
adj.parse_file(f)
#import sys
#print >>sys.stderr, adj.pointListAdjCovMat
net = Network(c3d, adj)
epl.append(net)
from gizela.stat.DisplacementTestType import DisplacementTestType
#epl.compute_displacement_test()
epl.compute_displacement_test(testType=DisplacementTestType.xy)
print epl
print epl.get_epoch_table()
from gizela.data.point_text_table import gama_coor_stdev_table
epl.textTable = gama_coor_stdev_table()
print epl.get_epoch_table()
print epl.pointListFix
#for pl in epl.epochPointList.iter_epoch():
# print pl.covmat.apriori
# print pl.covmat.aposteriori
# print pl.covmat.useApriori
# graph
#fig3 = FigureLayoutEpochList(figScale=figScale,
# title="Example",
# subtitle="y displacements")
# plot 2d
from gizela.pyplot.FigureLayoutEpochList2DTest import FigureLayoutEpochList2DTest
figScale = 1.0/1e4
print "Figure 2D Test"
fig1 = FigureLayoutEpochList2DTest(figScale=figScale,
displScale=1.0/figScale/5,
title="Example",
subtitle="displacements")
epl.plot_xy(fig1, plotTest=True)
#fig1.save_as()
# plot z coordinate
from gizela.pyplot.FigureLayoutEpochList1DTest import FigureLayoutEpochList1DTest
id = "C2"
print "Figure 1D Test of point %s" % id
fig2 = FigureLayoutEpochList1DTest(displScale=1.0,
title="Example",
subtitle="z displacements - point %s" % id)
epl.compute_displacement_test(testType=DisplacementTestType.z)
#print epl.get_epoch_table()
epl.plot_z(fig2, id, plotTest=True)
#fig2.save_as()
# plot y coordinates
#fig = Figure()
#epl.plot_point_y(fig, "ABC3", figScale=2)
#fig.show_(False)
# plot z coordinates
#fig = Figure()
#epl.plot_point_z(fig, "C3", figScale=2)
# print abom
from math import pi
ep0 = epl.epochPointList.get_epoch(1)
for p in ep0:
if p.displ.is_set_xy():
ell = p.displ.errEll
print p.id, ell[-1]*200/pi
#fig1.show_(True)
import sys
#sys.exit(1)
print "\n # joined adjustment"
try:
file = open("../../example/xml-epoch/joined.adj.xml")
except IOError, e:
print e
print "Try to run make in directory gizela/trunk/example/xml-epoch"
import sys
sys.exit(1)
epl = NetworkAdjList(coordSystemLocal=c3d, stdevUseApriori=True)
#epl = EpochList(stdevUseApriori=False)
adj = GamaLocalDataAdj()
adj.parse_file(file)
net = Network(c3d, adj)
epl.append_joined(net, reString="^e_(\d+)_(.+)$", epochIndex=0, pointIndex=1, )
#print epl.dateTimeList
from gizela.stat.DisplacementTestType import DisplacementTestType
#epl.compute_displacement_test()
epl.compute_displacement_test(testType=DisplacementTestType.xy)
print epl
print epl.get_epoch_table()
print epl.pointListFix
| gpl-3.0 |
MissCatLady/AlarmEZ | venv/lib/python2.7/site-packages/psycopg2/tests/test_bugX000.py | 81 | 1678 | #!/usr/bin/env python
# bugX000.py - test for DateTime object allocation bug
#
# Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import time
import unittest
class DateTimeAllocationBugTestCase(unittest.TestCase):
def test_date_time_allocation_bug(self):
d1 = psycopg2.Date(2002,12,25)
d2 = psycopg2.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
t1 = psycopg2.Time(13,45,30)
t2 = psycopg2.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
t1 = psycopg2.Timestamp(2002,12,25,13,45,30)
t2 = psycopg2.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0)))
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
zetalog/linux | tools/perf/scripts/python/syscall-counts-by-pid.py | 532 | 2055 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
from __future__ import print_function
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print("\nsyscall events for %s:\n" % (for_comm))
else:
print("\nsyscall events by comm/pid:\n")
print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
print("%-40s %10s" % ("----------------------------------------",
"----------"))
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print("\n%s [%d]" % (comm, pid))
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].items(),
key = lambda kv: (kv[1], kv[0]), reverse = True):
print(" %-38s %10d" % (syscall_name(id), val))
| gpl-2.0 |
PepSalehi/boulder | backup/june 12/LTS/__init__.py | 5 | 1292 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LTS
A QGIS plugin
Computes level of traffic stress
-------------------
begin : 2014-04-24
copyright : (C) 2014 by Peyman Noursalehi / Northeastern University
email : p.noursalehi@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load LTS class from file LTS
from lts import LTS
return LTS(iface)
| mit |
SUSE-Cloud/nova | nova/tests/fakeguestfs.py | 7 | 4597 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class GuestFS(object):
def __init__(self):
self.drives = []
self.running = False
self.closed = False
self.mounts = []
self.files = {}
self.auginit = False
self.attach_method = 'libvirt'
self.root_mounted = False
def launch(self):
self.running = True
def shutdown(self):
self.running = False
self.mounts = []
self.drives = []
def close(self):
self.closed = True
def add_drive_opts(self, file, *args, **kwargs):
self.drives.append((file, kwargs['format']))
def get_attach_method(self):
return self.attach_method
def set_attach_method(self, attach_method):
self.attach_method = attach_method
def inspect_os(self):
return ["/dev/guestvgf/lv_root"]
def inspect_get_mountpoints(self, dev):
return [["/home", "/dev/mapper/guestvgf-lv_home"],
["/", "/dev/mapper/guestvgf-lv_root"],
["/boot", "/dev/vda1"]]
def mount_options(self, options, device, mntpoint):
if mntpoint == "/":
self.root_mounted = True
else:
if not self.root_mounted:
raise RuntimeError(
"mount: %s: No such file or directory" % mntpoint)
self.mounts.append((options, device, mntpoint))
def mkdir_p(self, path):
if path not in self.files:
self.files[path] = {
"isdir": True,
"gid": 100,
"uid": 100,
"mode": 0o700
}
def read_file(self, path):
if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
"gid": 100,
"uid": 100,
"mode": 0o700
}
return self.files[path]["content"]
def write(self, path, content):
if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
"gid": 100,
"uid": 100,
"mode": 0o700
}
self.files[path]["content"] = content
def write_append(self, path, content):
if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
"gid": 100,
"uid": 100,
"mode": 0o700
}
self.files[path]["content"] = self.files[path]["content"] + content
def stat(self, path):
if path not in self.files:
raise RuntimeError("No such file: " + path)
return self.files[path]["mode"]
def chown(self, uid, gid, path):
if path not in self.files:
raise RuntimeError("No such file: " + path)
if uid != -1:
self.files[path]["uid"] = uid
if gid != -1:
self.files[path]["gid"] = gid
def chmod(self, mode, path):
if path not in self.files:
raise RuntimeError("No such file: " + path)
self.files[path]["mode"] = mode
def aug_init(self, root, flags):
self.auginit = True
def aug_close(self):
self.auginit = False
def aug_get(self, cfgpath):
if not self.auginit:
raise RuntimeError("Augeus not initialized")
if cfgpath == "/files/etc/passwd/root/uid":
return 0
elif cfgpath == "/files/etc/passwd/fred/uid":
return 105
elif cfgpath == "/files/etc/passwd/joe/uid":
return 110
elif cfgpath == "/files/etc/group/root/gid":
return 0
elif cfgpath == "/files/etc/group/users/gid":
return 500
elif cfgpath == "/files/etc/group/admins/gid":
return 600
raise RuntimeError("Unknown path %s", cfgpath)
| apache-2.0 |
CodEnFisH/palantir | floodlight/thrift/lib/py/src/TSerialization.py | 74 | 1344 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from protocol import TBinaryProtocol
from transport import TTransport
def serialize(thrift_object, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base, buf, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
| apache-2.0 |
Korkki/django | tests/invalid_models_tests/test_ordinary_fields.py | 224 | 19694 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.core.checks import Error, Warning as DjangoWarning
from django.db import connection, models
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.timezone import now
from .base import IsolatedModelsTestCase
class AutoFieldTests(IsolatedModelsTestCase):
def test_valid_case(self):
class Model(models.Model):
id = models.AutoField(primary_key=True)
field = Model._meta.get_field('id')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_primary_key(self):
# primary_key must be True. Refs #12467.
class Model(models.Model):
field = models.AutoField(primary_key=False)
# Prevent Django from autocreating `id` AutoField, which would
# result in an error, because a model must have exactly one
# AutoField.
another = models.IntegerField(primary_key=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=field,
id='fields.E100',
),
]
self.assertEqual(errors, expected)
class BooleanFieldTests(IsolatedModelsTestCase):
def test_nullable_boolean_field(self):
class Model(models.Model):
field = models.BooleanField(null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=field,
id='fields.E110',
),
]
self.assertEqual(errors, expected)
class CharFieldTests(IsolatedModelsTestCase, TestCase):
def test_valid_field(self):
class Model(models.Model):
field = models.CharField(
max_length=255,
choices=[
('1', 'item1'),
('2', 'item2'),
],
db_index=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_missing_max_length(self):
class Model(models.Model):
field = models.CharField()
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=field,
id='fields.E120',
),
]
self.assertEqual(errors, expected)
def test_negative_max_length(self):
class Model(models.Model):
field = models.CharField(max_length=-1)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_length' must be a positive integer.",
hint=None,
obj=field,
id='fields.E121',
),
]
self.assertEqual(errors, expected)
def test_bad_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length="bad")
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_length' must be a positive integer.",
hint=None,
obj=field,
id='fields.E121',
),
]
self.assertEqual(errors, expected)
def test_str_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length='20')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_length' must be a positive integer.",
hint=None,
obj=field,
id='fields.E121',
),
]
self.assertEqual(errors, expected)
def test_non_iterable_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=field,
id='fields.E004',
),
]
self.assertEqual(errors, expected)
def test_choices_containing_non_pairs(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'choices' must be an iterable containing (actual value, human readable name) tuples.",
hint=None,
obj=field,
id='fields.E005',
),
]
self.assertEqual(errors, expected)
def test_bad_db_index_value(self):
class Model(models.Model):
field = models.CharField(max_length=10, db_index='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'db_index' must be None, True or False.",
hint=None,
obj=field,
id='fields.E006',
),
]
self.assertEqual(errors, expected)
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_too_long_char_field_under_mysql(self):
from django.db.backends.mysql.validation import DatabaseValidation
class Model(models.Model):
field = models.CharField(unique=True, max_length=256)
field = Model._meta.get_field('field')
validator = DatabaseValidation(connection=None)
errors = validator.check_field(field)
expected = [
Error(
'MySQL does not allow unique CharFields to have a max_length > 255.',
hint=None,
obj=field,
id='mysql.E001',
)
]
self.assertEqual(errors, expected)
class DateFieldTests(IsolatedModelsTestCase, TestCase):
def test_auto_now_and_auto_now_add_raise_error(self):
class Model(models.Model):
field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now)
field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now)
field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now)
field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None)
expected = []
checks = []
for i in range(4):
field = Model._meta.get_field('field%d' % i)
expected.append(Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=field,
id='fields.E160',
))
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateField(default=now())
field_d = models.DateField(default=now().date())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
expected = [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
]
maxDiff = self.maxDiff
self.maxDiff = None
self.assertEqual(errors, expected)
self.maxDiff = maxDiff
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
class DateTimeFieldTests(IsolatedModelsTestCase, TestCase):
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateTimeField(default=now())
field_d = models.DateTimeField(default=now().date())
field_now = models.DateTimeField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
expected = [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
]
maxDiff = self.maxDiff
self.maxDiff = None
self.assertEqual(errors, expected)
self.maxDiff = maxDiff
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
class DecimalFieldTests(IsolatedModelsTestCase):
def test_required_attributes(self):
class Model(models.Model):
field = models.DecimalField()
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=field,
id='fields.E130',
),
Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=field,
id='fields.E132',
),
]
self.assertEqual(errors, expected)
def test_negative_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits=-1, decimal_places=-1)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=field,
id='fields.E133',
),
]
self.assertEqual(errors, expected)
def test_bad_values_of_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits="bad", decimal_places="bad")
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=field,
id='fields.E133',
),
]
self.assertEqual(errors, expected)
def test_decimal_places_greater_than_max_digits(self):
class Model(models.Model):
field = models.DecimalField(max_digits=9, decimal_places=10)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=field,
id='fields.E134',
),
]
self.assertEqual(errors, expected)
def test_valid_field(self):
class Model(models.Model):
field = models.DecimalField(max_digits=10, decimal_places=10)
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
class FileFieldTests(IsolatedModelsTestCase):
def test_valid_case(self):
class Model(models.Model):
field = models.FileField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_unique(self):
class Model(models.Model):
field = models.FileField(unique=False, upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'unique' is not a valid argument for a FileField.",
hint=None,
obj=field,
id='fields.E200',
)
]
self.assertEqual(errors, expected)
def test_primary_key(self):
class Model(models.Model):
field = models.FileField(primary_key=False, upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'primary_key' is not a valid argument for a FileField.",
hint=None,
obj=field,
id='fields.E201',
)
]
self.assertEqual(errors, expected)
class FilePathFieldTests(IsolatedModelsTestCase):
def test_forbidden_files_and_folders(self):
class Model(models.Model):
field = models.FilePathField(allow_files=False, allow_folders=False)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=field,
id='fields.E140',
),
]
self.assertEqual(errors, expected)
class GenericIPAddressFieldTests(IsolatedModelsTestCase):
def test_non_nullable_blank(self):
class Model(models.Model):
field = models.GenericIPAddressField(null=False, blank=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=field,
id='fields.E150',
),
]
self.assertEqual(errors, expected)
class ImageFieldTests(IsolatedModelsTestCase):
def test_pillow_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
pillow_installed = False
else:
pillow_installed = True
class Model(models.Model):
field = models.ImageField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [] if pillow_installed else [
Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=field,
id='fields.E210',
),
]
self.assertEqual(errors, expected)
class IntegerFieldTests(IsolatedModelsTestCase):
def test_max_length_warning(self):
class Model(models.Model):
value = models.IntegerField(max_length=2)
value = Model._meta.get_field('value')
errors = Model.check()
expected = [
DjangoWarning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=value,
id='fields.W122',
)
]
self.assertEqual(errors, expected)
class TimeFieldTests(IsolatedModelsTestCase, TestCase):
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.TimeField(default=now())
field_t = models.TimeField(default=now().time())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_t = Model._meta.get_field('field_t')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_t.check())
errors.extend(field_now.check()) # doesn't raise a warning
expected = [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_t,
id='fields.W161',
)
]
maxDiff = self.maxDiff
self.maxDiff = None
self.assertEqual(errors, expected)
self.maxDiff = maxDiff
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
| bsd-3-clause |
kryptine/ghilbert | verify_test.py | 3 | 5332 | #encoding: utf-8
import sys
import verify
import copy
class StringStream:
def __init__(self, s):
self.lines = s.split('\n')
self.ix = 0
def readline(self):
if self.ix >= len(self.lines):
return ''
else:
result = self.lines[self.ix] + '\n'
self.ix += 1
return result
class TestUrlCtx:
def __init__(self):
self.d = {}
self._stash = {}
def add(self, url, val):
self.d[url] = val
def resolve(self, url):
return StringStream(self.d[url])
# additional interface for data-driven tests
def open_append(self, url):
self.current_url = url
if not self.d.has_key(url):
self.d[url] = ''
def append_current(self, text):
self.d[self.current_url] += text
def revert(self):
self.d = copy.deepcopy(self._stash)
def stash(self):
self._stash = copy.deepcopy(self.d)
def clear_stash(self):
self._stash = {}
def sexp(s):
stream = StringStream(s)
return verify.read_sexp(verify.Scanner(stream))
def test_one_fv(verifyctx, expected, var, term, fvctx = None):
free = verifyctx.free_in(var, sexp(term), fvctx)
if free: explanation = "free in"
else: explanation = "not free in"
if verbose or free != expected: print var, explanation, term
if free != expected:
raise verify.VerifyError('fail')
def TestFv(out):
urlctx = TestUrlCtx()
urlctx.add('foo.ghi',
"""kind (wff)
kind (nat)
tvar (wff ph ps)
tvar (nat A B)
var (nat x y)
term (wff (= A B))
term (wff (A. x ph))
term (wff ([/] A x ph) (x A))
""")
verifyctx = verify.VerifyCtx(urlctx, run_regression)
verifyctx.do_cmd('import', ['FOO', 'foo.ghi', [], '""'], out)
verifyctx.do_cmd('tvar', ['nat', 'A', 'B'], out)
verifyctx.do_cmd('var', ['nat', 'x', 'y'], out)
test_one_fv(verifyctx, True, 'x', '(= x y)')
test_one_fv(verifyctx, False, 'z', '(= x y)')
test_one_fv(verifyctx, False, 'x', '(A. x ph)')
test_one_fv(verifyctx, True, 'x', '([/] (= x y) x ph)')
test_one_fv(verifyctx, False, 'x', '([/] A x ph)')
test_one_fv(verifyctx, True, 'x', 'x')
test_one_fv(verifyctx, False, 'x', 'y')
fvvars_x = {'A': 0}
fvvars_y = {}
test_one_fv(verifyctx, False, 'x', 'A', fvvars_x)
test_one_fv(verifyctx, True, 'y', 'A', fvvars_y)
test_one_fv(verifyctx, False, 'z', 'A')
test_one_fv(verifyctx, True, 'x', 'x', fvvars_x)
test_one_fv(verifyctx, False, 'x', 'y', fvvars_x)
# Version of run loop tuned for regression testing
def run_regression(urlctx, url, ctx, out):
s = verify.Scanner(urlctx.resolve(url))
while 1:
cmd = verify.read_sexp(s)
if cmd is None:
return True
if type(cmd) != str:
raise SyntaxError('cmd must be atom')
arg = verify.read_sexp(s)
ctx.do_cmd(cmd, arg, out)
def regression(fn, out):
tests = 0
failures = 0
urlctx = TestUrlCtx()
lineno = 0
for l in file(fn).xreadlines():
lineno += 1
if l.startswith('!'):
cmd = l.split()
if cmd[0] == '!append':
urlctx.open_append(cmd[1])
elif cmd[0] == '!shared':
urlctx = TestUrlCtx()
elif cmd[0] == '!end':
urlctx.stash()
elif cmd[0] == '!unshare':
urlctx = TestUrlCtx()
elif cmd[0] in ('!accept', '!reject'):
verifyctx = verify.VerifyCtx(urlctx, run_regression)
error = None
tests += 1
if len(cmd) < 2:
failures += 1
print str(lineno) + ": FAIL, Missing proof module name for !accept or !reject command"
else:
try:
run_regression(urlctx, cmd[1], verifyctx, out)
except verify.VerifyError, x:
error = "VerifyError: " + x.why
except SyntaxError, x:
error = "SyntaxError: " + str(x)
if error is None and cmd[0] == '!reject':
failures += 1
print str(lineno) + ': FAIL, expected error: ' + ' '.join(cmd[2:])
elif error and cmd[0] == '!accept':
failures += 1
print str(lineno) + ': FAIL, got unexpected ' + error
if verbose >= 1 and error and cmd[0] == '!reject':
print str(lineno) + ': ok ' + error
urlctx.revert()
else:
failures += 1
print str(lineno) + ": FAIL, unrecognized command " + cmd[0]
elif l.strip() and not l.startswith('#'):
urlctx.append_current(l)
return [tests, failures]
verbose = 1
TestFv(sys.stdout)
if len(sys.argv) > 1:
tests, failures = regression(sys.argv[1], sys.stdout)
print
print tests, 'tests run, ', failures, 'failures'
exit(0 if failures == 0 else 1)
| apache-2.0 |
kytvi2p/tahoe-lafs | misc/simulators/ringsim.py | 8 | 8171 | #! /usr/bin/python
# used to discuss ticket #302: "stop permuting peerlist?"
# import time
import math
from hashlib import md5 # sha1, sha256
myhash = md5
# md5: 1520 "uploads" per second
# sha1: 1350 ups
# sha256: 930 ups
from itertools import count
from twisted.python import usage
def abbreviate_space(s, SI=True):
if s is None:
return "unknown"
if SI:
U = 1000.0
isuffix = "B"
else:
U = 1024.0
isuffix = "iB"
def r(count, suffix):
return "%.2f %s%s" % (count, suffix, isuffix)
if s < 1024: # 1000-1023 get emitted as bytes, even in SI mode
return "%d B" % s
if s < U*U:
return r(s/U, "k")
if s < U*U*U:
return r(s/(U*U), "M")
if s < U*U*U*U:
return r(s/(U*U*U), "G")
if s < U*U*U*U*U:
return r(s/(U*U*U*U), "T")
return r(s/(U*U*U*U*U), "P")
def make_up_a_file_size(seed):
h = int(myhash(seed).hexdigest(),16)
max=2**31
if 1: # exponential distribution
e = 8 + (h % (31-8))
return 2 ** e
# uniform distribution
return h % max # avg 1GB
sizes = [make_up_a_file_size(str(i)) for i in range(10000)]
avg_filesize = sum(sizes)/len(sizes)
print "average file size:", abbreviate_space(avg_filesize)
SERVER_CAPACITY = 10**12
class Server:
def __init__(self, nodeid, capacity):
self.nodeid = nodeid
self.used = 0
self.capacity = capacity
self.numshares = 0
self.full_at_tick = None
def upload(self, sharesize):
if self.used + sharesize < self.capacity:
self.used += sharesize
self.numshares += 1
return True
return False
def __repr__(self):
if self.full_at_tick is not None:
return "<%s %s full at %d>" % (self.__class__.__name__, self.nodeid, self.full_at_tick)
else:
return "<%s %s>" % (self.__class__.__name__, self.nodeid)
class Ring:
SHOW_MINMAX = False
def __init__(self, numservers, seed, permute):
self.servers = []
for i in range(numservers):
nodeid = myhash(str(seed)+str(i)).hexdigest()
capacity = SERVER_CAPACITY
s = Server(nodeid, capacity)
self.servers.append(s)
self.servers.sort(key=lambda s: s.nodeid)
self.permute = permute
#self.list_servers()
def list_servers(self):
for i in range(len(self.servers)):
s = self.servers[i]
next_s = self.servers[(i+1)%len(self.servers)]
diff = "%032x" % (int(next_s.nodeid,16) - int(s.nodeid,16))
s.next_diff = diff
prev_s = self.servers[(i-1)%len(self.servers)]
diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16))
s.prev_diff = diff
print s, s.prev_diff
print "sorted by delta"
for s in sorted(self.servers, key=lambda s:s.prev_diff):
print s, s.prev_diff
def servers_for_si(self, si):
if self.permute:
def sortkey(s):
return myhash(s.nodeid+si).digest()
return sorted(self.servers, key=sortkey)
for i in range(len(self.servers)):
if self.servers[i].nodeid >= si:
return self.servers[i:] + self.servers[:i]
return list(self.servers)
def show_servers(self, picked):
bits = []
for s in self.servers:
if s in picked:
bits.append("1")
else:
bits.append("0")
#d = [s in picked and "1" or "0" for s in self.servers]
return "".join(bits)
def dump_usage(self, numfiles, avg_space_per_file):
print "uploaded", numfiles
# avg_space_per_file measures expected grid-wide ciphertext per file
used = list(reversed(sorted([s.used for s in self.servers])))
# used is actual per-server ciphertext
usedpf = [1.0*u/numfiles for u in used]
# usedpf is actual per-server-per-file ciphertext
#print "min/max usage: %s/%s" % (abbreviate_space(used[-1]),
# abbreviate_space(used[0]))
avg_usage_per_file = avg_space_per_file/len(self.servers)
# avg_usage_per_file is expected per-server-per-file ciphertext
spreadpf = usedpf[0] - usedpf[-1]
average_usagepf = sum(usedpf) / len(usedpf)
variance = sum([(u-average_usagepf)**2 for u in usedpf])/(len(usedpf)-1)
std_deviation = math.sqrt(variance)
sd_of_total = std_deviation / avg_usage_per_file
print "min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
abbreviate_space(usedpf[-1]),
abbreviate_space(usedpf[0]),
abbreviate_space(avg_usage_per_file) ),
print "spread-pf: %s (%.2f%%)" % (
abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file),
#print "average_usage:", abbreviate_space(average_usagepf)
print "stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
100.0*sd_of_total)
if self.SHOW_MINMAX:
s2 = sorted(self.servers, key=lambda s: s.used)
print "least:", s2[0].nodeid
print "most:", s2[-1].nodeid
class Options(usage.Options):
optParameters = [
("k", "k", 3, "required shares", int),
("N", "N", 10, "total shares", int),
("servers", None, 100, "number of servers", int),
("seed", None, None, "seed to use for creating ring"),
("fileseed", None, "blah", "seed to use for creating files"),
("permute", "p", 1, "1 to permute, 0 to use flat ring", int),
]
def postOptions(self):
assert self["seed"]
def do_run(ring, opts):
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
fileseed = opts["fileseed"]
all_servers_have_room = True
no_files_have_wrapped = True
for filenum in count(0):
#used = list(reversed(sorted([s.used for s in ring.servers])))
#used = [s.used for s in ring.servers]
#print used
si = myhash(fileseed+str(filenum)).hexdigest()
filesize = make_up_a_file_size(si)
sharesize = filesize / opts["k"]
if filenum%4000==0 and filenum > 1:
ring.dump_usage(filenum, avg_space_per_file)
servers = ring.servers_for_si(si)
#print ring.show_servers(servers[:opts["N"]])
remaining_shares = opts["N"]
index = 0
server_was_full = False
file_was_wrapped = False
remaining_servers = set(servers)
while remaining_shares:
if index >= len(servers):
index = 0
file_was_wrapped = True
s = servers[index]
accepted = s.upload(sharesize)
if not accepted:
server_was_full = True
remaining_servers.discard(s)
if not remaining_servers:
print "-- GRID IS FULL"
ring.dump_usage(filenum, avg_space_per_file)
return filenum
index += 1
continue
remaining_shares -= 1
index += 1
# file is done being uploaded
if server_was_full and all_servers_have_room:
all_servers_have_room = False
print "-- FIRST SERVER FULL"
ring.dump_usage(filenum, avg_space_per_file)
if file_was_wrapped and no_files_have_wrapped:
no_files_have_wrapped = False
print "-- FIRST FILE WRAPPED"
ring.dump_usage(filenum, avg_space_per_file)
def do_ring(opts):
total_capacity = opts["servers"]*SERVER_CAPACITY
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
avg_files = total_capacity / avg_space_per_file
print "expected number of uploads:", avg_files
if opts["permute"]:
print " PERMUTED"
else:
print " LINEAR"
seed = opts["seed"]
ring = Ring(opts["servers"], seed, opts["permute"])
do_run(ring, opts)
def run(opts):
do_ring(opts)
if __name__ == "__main__":
opts = Options()
opts.parseOptions()
run(opts)
| gpl-2.0 |
NewpTone/stacklab-nova | nova/virt/baremetal/tilera.py | 7 | 12646 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tilera back-end for bare-metal compute node provisioning
The details of this implementation are specific to ISI's testbed. This code
is provided here as an example of how to implement a backend.
"""
import base64
import subprocess
import time
from nova.compute import power_state
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
FLAGS = flags.FLAGS
tilera_opts = [
cfg.StrOpt('tile_monitor',
default='/usr/local/TileraMDE/bin/tile-monitor',
help='Tilera command line program for Bare-metal driver')
]
FLAGS.register_opts(tilera_opts)
LOG = logging.getLogger(__name__)
def get_baremetal_nodes():
return BareMetalNodes()
class BareMetalNodes(object):
"""
This manages node information and implements singleton.
BareMetalNodes class handles machine architectures of interest to
technical computing users have either poor or non-existent support
for virtualization.
"""
_instance = None
_is_init = False
def __new__(cls, *args, **kwargs):
"""
Returns the BareMetalNodes singleton.
"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(BareMetalNodes, cls).__new__(cls)
return cls._instance
def __init__(self, file_name="/tftpboot/tilera_boards"):
"""
Only call __init__ the first time object is instantiated.
From the bare-metal node list file: /tftpboot/tilera_boards,
reads each item of each node such as node ID, IP address,
MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
and appends each node information into nodes list.
"""
if self._is_init:
return
self._is_init = True
self.nodes = []
self.BOARD_ID = 0
self.IP_ADDR = 1
self.MAC_ADDR = 2
self.VCPUS = 3
self.MEMORY_MB = 4
self.LOCAL_GB = 5
self.MEMORY_MB_USED = 6
self.LOCAL_GB_USED = 7
self.HYPERVISOR_TYPE = 8
self.HYPERVISOR_VER = 9
self.CPU_INFO = 10
fp = open(file_name, "r")
for item in fp:
l = item.split()
if l[0] == '#':
continue
l_d = {'node_id': int(l[self.BOARD_ID]),
'ip_addr': l[self.IP_ADDR],
'mac_addr': l[self.MAC_ADDR],
'status': power_state.NOSTATE,
'vcpus': int(l[self.VCPUS]),
'memory_mb': int(l[self.MEMORY_MB]),
'local_gb': int(l[self.LOCAL_GB]),
'memory_mb_used': int(l[self.MEMORY_MB_USED]),
'local_gb_used': int(l[self.LOCAL_GB_USED]),
'hypervisor_type': l[self.HYPERVISOR_TYPE],
'hypervisor_version': int(l[self.HYPERVISOR_VER]),
'cpu_info': l[self.CPU_INFO]}
self.nodes.append(l_d)
fp.close()
def get_hw_info(self, field):
"""
Returns hardware information of bare-metal node by the given field.
Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
"""
for node in self.nodes:
if node['node_id'] == 9:
if field == 'vcpus':
return node['vcpus']
elif field == 'memory_mb':
return node['memory_mb']
elif field == 'local_gb':
return node['local_gb']
elif field == 'memory_mb_used':
return node['memory_mb_used']
elif field == 'local_gb_used':
return node['local_gb_used']
elif field == 'hypervisor_type':
return node['hypervisor_type']
elif field == 'hypervisor_version':
return node['hypervisor_version']
elif field == 'cpu_info':
return node['cpu_info']
def set_status(self, node_id, status):
"""
Sets status of the given node by the given status.
Returns 1 if the node is in the nodes list.
"""
for node in self.nodes:
if node['node_id'] == node_id:
node['status'] = status
return True
return False
def get_status(self):
"""
Gets status of the given node.
"""
pass
def get_idle_node(self):
"""
Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
"""
for item in self.nodes:
if item['status'] == 0:
item['status'] = 1 # make status RUNNING
return item['node_id']
raise exception.NotFound("No free nodes available")
def get_ip_by_id(self, id):
"""
Returns default IP address of the given node.
"""
for item in self.nodes:
if item['node_id'] == id:
return item['ip_addr']
def free_node(self, node_id):
"""
Sets/frees status of the given node as 0 (IDLE).
"""
LOG.debug(_("free_node...."))
for item in self.nodes:
if item['node_id'] == str(node_id):
item['status'] = 0 # make status IDLE
def power_mgr(self, node_id, mode):
"""
Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if node_id < 5:
pdu_num = 1
pdu_outlet_num = node_id + 5
else:
pdu_num = 2
pdu_outlet_num = node_id
path1 = "10.0.100." + str(pdu_num)
utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
str(mode), '>>', 'pdu_output')
def deactivate_node(self, node_id):
"""
Deactivates the given node by turnning it off.
/tftpboot/fs_x directory is a NFS of node#x
and /tftpboot/root_x file is an file system image of node#x.
"""
node_ip = self.get_ip_by_id(node_id)
LOG.debug(_("deactivate_node is called for "
"node_id = %(id)s node_ip = %(ip)s"),
{'id': str(node_id), 'ip': node_ip})
for item in self.nodes:
if item['node_id'] == node_id:
LOG.debug(_("status of node is set to 0"))
item['status'] = 0
self.power_mgr(node_id, 2)
self.sleep_mgr(5)
path = "/tftpboot/fs_" + str(node_id)
pathx = "/tftpboot/root_" + str(node_id)
utils.execute('sudo', '/usr/sbin/rpc.mountd')
try:
utils.execute('sudo', 'umount', '-f', pathx)
utils.execute('sudo', 'rm', '-f', pathx)
except Exception:
LOG.debug(_("rootfs is already removed"))
def network_set(self, node_ip, mac_address, ip_address):
"""
Sets network configuration based on the given ip and mac address.
User can access the bare-metal node using ssh.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"ifconfig xgbe0 hw ether " + mac_address +
" - --wait --run - ifconfig xgbe0 " + ip_address +
" - --wait --quit")
subprocess.Popen(cmd, shell=True)
#utils.execute(cmd, shell=True)
self.sleep_mgr(5)
def iptables_set(self, node_ip, user_data):
"""
Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
if user_data != '':
open_ip = base64.b64decode(user_data)
utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
def check_activated(self, node_id, node_ip):
"""
Checks whether the given node is activated or not.
"""
LOG.debug(_("Before ping to the bare-metal node"))
tile_output = "/tftpboot/tile_output_" + str(node_id)
grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
tile_output)
subprocess.Popen(grep_cmd, shell=True)
self.sleep_mgr(5)
file = open(tile_output, "r")
out_msg = file.readline().find("Unreachable")
utils.execute('sudo', 'rm', tile_output)
if out_msg == -1:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
locals())
return True
else:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
" out_msg=%(out_msg)s"), locals())
self.power_mgr(node_id, 2)
return False
def vmlinux_set(self, node_id, mode):
"""
Sets kernel into default path (/tftpboot) if needed.
From basepath to /tftpboot, kernel is set based on the given mode
such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
"""
LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
def sleep_mgr(self, time_in_seconds):
"""
Sleeps until the node is activated.
"""
time.sleep(time_in_seconds)
def ssh_set(self, node_ip):
"""
Sets and Runs sshd in the node.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"/usr/sbin/sshd - --wait --quit")
subprocess.Popen(cmd, shell=True)
self.sleep_mgr(5)
def activate_node(self, node_id, node_ip, name, mac_address,
ip_address, user_data):
"""
Activates the given node using ID, IP, and MAC address.
"""
LOG.debug(_("activate_node"))
self.power_mgr(node_id, 2)
self.power_mgr(node_id, 3)
self.sleep_mgr(100)
try:
self.check_activated(node_id, node_ip)
self.network_set(node_ip, mac_address, ip_address)
self.ssh_set(node_ip)
self.iptables_set(node_ip, user_data)
return power_state.RUNNING
except Exception as ex:
self.deactivate_node(node_id)
raise exception.NovaException(_("Node is unknown error state."))
def get_console_output(self, console_log, node_id):
"""
Gets console output of the given node.
"""
node_ip = self.get_ip_by_id(node_id)
log_path = "/tftpboot/log_" + str(node_id)
kmsg_cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip +
" -- dmesg > " + log_path)
subprocess.Popen(kmsg_cmd, shell=True)
self.sleep_mgr(5)
utils.execute('cp', log_path, console_log)
def get_image(self, bp):
"""
Gets the bare-metal file system image into the instance path.
Noting to do for tilera nodes: actual image is used.
"""
path_fs = "/tftpboot/tilera_fs"
path_root = bp + "/root"
utils.execute('cp', path_fs, path_root)
def set_image(self, bpath, node_id):
"""
Sets the PXE bare-metal file system from the instance path.
This should be done after ssh key is injected.
/tftpboot/fs_x directory is a NFS of node#x.
/tftpboot/root_x file is an file system image of node#x.
"""
path1 = bpath + "/root"
pathx = "/tftpboot/root_" + str(node_id)
path2 = "/tftpboot/fs_" + str(node_id)
utils.execute('sudo', 'mv', path1, pathx)
utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
| apache-2.0 |
verma-varsha/zulip | zerver/webhooks/mention/tests.py | 36 | 1368 | # -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class MentionHookTests(WebhookTestCase):
STREAM_NAME = 'test'
URL_TEMPLATE = "/api/v1/external/mention?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'mention'
def test_mention_webfeed(self):
# type: () -> None
expected_topic = u"news"
expected_message = (u"**[Historical Sexual Abuse (Football): 29 Nov 2016: House of Commons debates - TheyWorkForYou]"
u"(https://www.theyworkforyou.com/debates/?id=2016-11-29b.1398.7&p=24887)**:\n"
u"\u2026 Culture, Media and Sport\nNothing is more important than keeping children safe."
u" Child sex abuse is an exceptionally vile crime, and all of Government take it very seriously indeed,"
u" as I know this House does.\nChildren up and down the country are \u2026"
)
# use fixture named mention_webfeeds
self.send_and_test_stream_message('webfeeds', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("mention", fixture_name, file_type="json")
| apache-2.0 |
hzlf/openbroadcast.org | website/tools/postman/management/commands/postman_cleanup.py | 2 | 2236 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import timedelta
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.db.models import Max, Count, F, Q
try:
from django.utils.timezone import now # Django 1.4 aware datetimes
except ImportError:
from datetime import datetime
now = datetime.now
from postman.models import Message
class Command(NoArgsCommand):
help = """Can be run as a cron job or directly to clean out old data from the database:
Messages or conversations marked as deleted by both sender and recipient,
more than a minimal number of days ago."""
option_list = NoArgsCommand.option_list + (
make_option(
"-d",
"--days",
type="int",
default=30,
help="The minimal number of days a message is kept marked as deleted, "
"before to be considered for real deletion [default: %default]",
),
)
def handle_noargs(self, **options):
verbose = int(options.get("verbosity"))
days = options.get("days")
date = now() - timedelta(days=days)
if verbose >= 1:
self.stdout.write(
"Erase messages and conversations marked as deleted before %s\n" % date
)
# for a conversation to be candidate, all messages must satisfy the criteria
tpks = (
Message.objects.filter(thread__isnull=False)
.values("thread")
.annotate(
cnt=Count("pk"),
s_max=Max("sender_deleted_at"),
s_cnt=Count("sender_deleted_at"),
r_max=Max("recipient_deleted_at"),
r_cnt=Count("recipient_deleted_at"),
)
.order_by()
.filter(s_cnt=F("cnt"), r_cnt=F("cnt"), s_max__lte=date, r_max__lte=date)
.values_list("thread", flat=True)
)
Message.objects.filter(
Q(thread__in=tpks)
| Q(
thread__isnull=True,
sender_deleted_at__lte=date,
recipient_deleted_at__lte=date,
)
).delete()
| gpl-3.0 |
DasIch/pyalysis | pyalysis/analysers/base.py | 1 | 3771 | # coding: utf-8
"""
pyalysis.analysers.base
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2014 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import absolute_import
import tokenize
from blinker import Signal
from pyalysis.utils import PerClassAttribute, retain_file_position
from pyalysis._compat import PY2
class AnalyserBase(object):
"""
A base class for analysers. To implement an analyser you should subclass
this class and implement :meth:`analyse`.
"""
#: :class:`blinker.Signal` instance that will be called by :meth:`analyse`,
#: with the :class:`AnalyserBase` instance as sender.
on_analyse = PerClassAttribute(Signal)
def __init__(self, module):
#: The module being analysed as a file-like object opened in read-only
#: bytes mode.
self.module = module
#: A list with the lines in the module.
with retain_file_position(self.module):
self.physical_lines = [line.rstrip() for line in self.module]
with retain_file_position(self.module):
#: A list with the logical lines in the module.
self.logical_lines = []
self.logical_line_linenos = []
self._index2logical_line_index = []
for logical_line_index, (start, end, line) in enumerate(
get_logical_lines(self.module)
):
for _ in range(end - start + 1):
self._index2logical_line_index.append(logical_line_index)
self.logical_line_linenos.append((start, end))
self.logical_lines.append(line)
#: A list of warnings generated by the analyser.
self.warnings = []
def get_logical_lines(self, start, end):
"""
Returns an iterator of the logical lines between the given `start` and
`end` location.
"""
if start.line == end.line:
logical_line_indices = [
self._index2logical_line_index[start.line - 1]
]
else:
logical_line_indices = sorted({
self._index2logical_line_index[lineno - 1]
for lineno in range(start.line, end.line)
})
for index in logical_line_indices:
yield self.logical_lines[index]
def get_logical_line_range(self, lineno):
"""
Returns a tuple containing the first and last line number of the
logical line in which the given `lineno` is contained.
"""
logical_line_index = self._index2logical_line_index[lineno - 1]
return self.logical_line_linenos[logical_line_index]
def emit(self, warning_cls, message, start, end):
"""
Adds an instance of `warning_cls` to :attr:`warnings`.
`warning_cls` will be called with the warning `message`, the name of
the module in which the warning occurred, the `start` and `end`
location of the code being warned about and a list of logical lines
corresponding to the given locations.
"""
self.warnings.append(
warning_cls(
message, self.module.name, start, end,
list(self.get_logical_lines(start, end))
)
)
def analyse(self):
"""
Analyses the module and returns :attr:`warnings` for convenience.
"""
return self.warnings
def get_logical_lines(file):
if PY2:
generate_tokens = tokenize.generate_tokens
else:
generate_tokens = tokenize.tokenize
seen = 0
for _, _, start, end, logical_line in generate_tokens(file.readline):
if start[0] > seen:
yield start[0], end[0], logical_line.rstrip()
seen = end[0]
| bsd-3-clause |
alexandreleroux/mayavi | integrationtests/mayavi/test_optional_collection.py | 2 | 4211 | """Simple test for the Optional and Collection filters.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from StringIO import StringIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestOptionalCollection(TestCase):
def test(self):
self.main()
def do(self):
############################################################
# Imports.
script = self.script
from mayavi.sources.vtk_file_reader import VTKFileReader
from mayavi.filters.contour import Contour
from mayavi.filters.optional import Optional
from mayavi.filters.collection import Collection
from mayavi.filters.api import PolyDataNormals
from mayavi.modules.api import Surface
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
# Read a VTK (old style) data file.
r = VTKFileReader()
r.initialize(get_example_data('heart.vtk'))
script.add_source(r)
c = Contour()
# `name` is used for the notebook tabs.
n = PolyDataNormals(name='Normals')
o = Optional(filter=n, label_text='Compute normals')
coll = Collection(filters=[c, o], name='IsoSurface')
script.add_filter(coll)
s = Surface()
script.add_module(s)
########################################
# do the testing.
def check(coll):
"""Check if test status is OK given the collection."""
c, o = coll.filters
c = c.filter
n = o.filter
assert coll.outputs[0].point_data.scalars.range == (127.5, 127.5)
# Adding a contour should create the appropriate output in
# the collection.
c.contours.append(200)
assert coll.outputs[0].point_data.scalars.range == (127.5, 200.0)
# the collection's output should be that of the normals.
assert coll.outputs[0] is n.outputs[0]
# disable the optional filter and check.
o.enabled = False
assert 'disabled' in o.name
assert coll.outputs[0] is c.outputs[0]
# Set back everything to original state.
c.contours.pop()
o.enabled = True
assert coll.outputs[0].point_data.scalars.range == (127.5, 127.5)
assert coll.outputs[0] is n.outputs[0]
assert 'disabled' not in o.name
check(coll)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = StringIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
# Now do the check.
coll = s.children[0].children[0]
check(coll)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestOptionalCollection()
t.test()
| bsd-3-clause |
senadmd/coinmarketwatch | test/functional/abandonconflict.py | 19 | 7851 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.nodes[0] = self.start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.nodes[0] = self.start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.nodes[0] = self.start_node(0, self.options.tmpdir, ["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| mit |
tpbrisco/bender | benders_ui.py | 1 | 10713 | #!/usr/bin/python
"""Interface to a simple policy database representing network policy
This allows owners to update the own "host groups" and "service templates"
which allow policy statements.
While this version uses a CSV files, it should be easily
extensible to use more conventional databases.
"""
import bender_sql as bender
import socket, sys
from flask import Flask, request, url_for, render_template, redirect
# gethostaddr - similar to socket.gethostbyname() - but use getaddrinfo() to deal
# with IPv6 addresses
def gethostaddr(name):
# a _very_ bad way to if name is an IP address or IP network (v4 or v6)
s = name
if s.strip('0123456789/.:abcdefABCDEF') == '':
return name # _probably_ an IPv4 or IPv6 address or network
# raises gaierror for invalid names
try:
h_infos = socket.getaddrinfo(name,None,0,0,socket.SOL_IP)
except socket.gaierror as e:
e_msg = socket.gai_strerror(e)
print "bender.gethostaddr(name):", e_msg
errors = errors + errors_nl + e_msg
errors_nl = '\r\n'
raise e
# go for the first item returned in the array
# print "Name",name,"address",h_infos[0][4][0]
return h_infos[0][4][0]
# set up initial Flask and SQLAlchemy stuff
b_ui = Flask(__name__, static_url_path='/static')
db_cfg = bender.read_config("database",['/etc/bender.cf','bender.cf'])
if len(sys.argv) < 2 and db_cfg['uri'] == '':
print "benders_ui <sqlalchemy URI>"
print "\tmysql:///user:pass@hostname:3306/bender"
sys.exit(1)
if db_cfg['uri'] == '':
db_uri = sys.argv[1]
else:
db_uri = db_cfg['uri']
# Load the databases
hg = bender.host_group(db_uri, 'hostgroups')
sg = bender.service_template(db_uri, 'service_templates')
pg = bender.policy_group(db_uri, 'policy')
sdp = bender.policy_render(db_uri, 'sdp')
#
# Set up Flask main page, which has links to everything else
#
@b_ui.route('/index')
@b_ui.route('/')
def index_hostgroups():
r_info = []
q_info = []
p_info = []
sdp_info = []
# check for messages from other parts redirecting here
hostgroup_err = request.args.get('hg_msg')
service_err = request.args.get('svc_msg')
policy_err = request.args.get('pol_msg')
sdp_err = request.args.get('sdp_msg')
# sort when displaying, so members of the same group appear next to each other
for h in sorted(hg.select(), key=lambda k: k['hg_name']):
r_info.append(h.copy())
for s in sorted(sg.select(), key=lambda k: k['st_name']):
q_info.append(s.copy())
for p in sorted(pg.select(), key=lambda k: k['p_name']):
p_info.append(p.copy())
for sd in sorted(sdp.select(), key=lambda k: k['sdp_group']):
sdp_info.append(sd.copy())
return render_template('benders_index.html',
groupinfo=r_info, hg_error=hostgroup_err,
svcinfo=q_info, svc_error=service_err,
polinfo=p_info, pol_error=policy_err,
sdpinfo=sdp_info, sdp_error=sdp_err)
#####################################################
# Group management
#####################################################
@b_ui.route('/delgroup', methods=['POST'])
def delete_group():
g = request.form['group']
if not g:
print "Web: *** delete_group - no group specified:", s
return redirect(url_for('index_hostgroups'))
print "Web: delete group ", g
dg = hg.select(hg_name=g)
for d in dg:
hg.delete(d)
return redirect(url_for('index_hostgroups'))
@b_ui.route('/delmember', methods=['POST'])
def delete_member():
m = request.form['member']
g = request.form['group']
if not m or not g:
print "Web: *** delete_member - no member specified:", m
return redirect(url_for('index_hostgroups'))
dm = hg.select(hg_name=g, hg_member=m)
print "Web: delete member ", m, "returned", dm
for m in dm:
hg.delete(m)
return redirect(url_for('index_hostgroups'))
@b_ui.route('/addgroup', methods=['POST'])
def add_group():
m = request.form['groupmember']
g = request.form['groupname']
t = request.form['grouptype']
o = request.form['groupowner']
r = request.form['grouprp']
if not m or not g:
print "Web: *** add_group - no member or group specified:", g
return redirect(url_for('index_hostgroups', hg_msg="no member specified"))
print "Web: add member", m, "to group", g
hg.add(hg_name=g, hg_member=m, hg_type=t, hg_owner=o, hg_rp=r)
return redirect(url_for('index_hostgroups')+"#groups")
@b_ui.route('/savegroup', methods=['POST'])
def save_group():
hg.save("testdata/mock-hostdb.csv")
return redirect(url_for('index_hostgroups')+"#groups")
#####################################################
# service management
#####################################################
@b_ui.route('/deletesvc', methods=['POST'])
def delete_service():
sname = request.form['name']
print "Web: *** delete_service", sname
sl = sg.select(st_name=sname)
for s in sl:
sg.delete(s)
return redirect(url_for('index_hostgroups')+"#services")
@b_ui.route('/deletesvcline', methods=['POST'])
def delete_service_line():
lname = request.form['name']
print "Web: *** delete_service line", lname
# sl = sg.select(name=sname)
sl = sg.select(st_name=request.form['name'],
st_port=request.form['port'],
st_protocol=request.form['protocol'],
st_transport=request.form['transport'],
st_bidir=request.form['bidir'],
st_owner=request.form['owner'],
st_rp=request.form['rp'])
for s in sl:
sg.delete(s)
return redirect(url_for('index_hostgroups')+"#services")
@b_ui.route('/addservice', methods=['POST'])
def add_service():
name = request.form['name']
port = request.form['port']
protocol = request.form['protocol']
transport = request.form['transport']
bidir = request.form['bidir']
owner = request.form['owner']
rp = request.form['rp']
if not port.isdigit():
# pop up message here
return redirect(url_for('index_hostgroups',svc_msg='port must be a number')+"#services")
else:
sg.add(st_name=name,
st_port=port,
st_protocol=protocol,
st_bidir=bidir,
st_transport=transport,
st_owner=owner,
st_rp=rp)
return redirect(url_for('index_hostgroups')+"#services")
@b_ui.route('/saveservice', methods=['POST'])
def save_service():
sg.save("testdata/mock-svcdb.csv")
return redirect(url_for('index_hostgroups')+"#services")
#####################################################
# Policy management
#####################################################
@b_ui.route('/delpolicyline', methods=['POST'])
def delete_policy_line():
name = request.form['name']
source = request.form['source']
destination = request.form['destination']
template = request.form['template']
dpol = pg.select(p_name=name,
p_source=source,
p_destination=destination,
p_template=template)
for d in dpol:
pg.delete(d)
return redirect(url_for('index_hostgroups')+"#policies")
@b_ui.route('/delpolicy', methods=['POST'])
def delete_policy():
name = request.form['name']
dpol = pg.select(p_name=name)
for d in dpol:
pg.delete(d)
return redirect(url_for('index_hostgroups')+"#policies")
@b_ui.route('/addpolicy', methods=['POST'])
def add_policy():
name = request.form['name']
source = request.form['source']
destination = request.form['destination']
template = request.form['template']
pg.add(p_name=name,
p_source=source,
p_destination=destination,
p_template=template)
return redirect(url_for('index_hostgroups')+"#policies")
@b_ui.route('/savepolicy', methods=['POST'])
def save_policy():
pg.save("testdata/mock-poldb.csv")
return redirect(url_for('index_hostgroups')+"#policies")
#####################################################
# Rendering functions
#####################################################
@b_ui.route('/rendersdp', methods=['POST'])
def render_sdp():
# Generate all policies
# for all policies
# for all hosts in the policy source host group
# for all hosts in the policy destination host group
# for all services in the service template
# save the source,destination,port information
errors = ''
errors_nl = ''
# regenerate what we need
for p in pg.select():
# print "\t%s policy groups" % (len(p))
for src in hg.select(hg_name=p['p_source']):
for dst in hg.select(hg_name=p['p_destination']):
for svc in sg.select(st_name=p['p_template']):
name = "%s_%s_%s" % (src['hg_name'], dst['hg_name'], svc['st_name'])
try:
source_ip = gethostaddr(src['hg_member'])
destination_ip = gethostaddr(dst['hg_member'])
except:
e_msg = "Error looking up %s or %s" % (src['hg_member'],dst['hg_member'])
print e_msg
errors = errors + errors_nl + e_msg
errors_nl = '\r\n'
continue # just skip it?
if src['hg_member'] != dst['hg_member']:
# print "\tSDP Add:", src['hg_member'], "and", dst['hg_member'], "for", svc['st_name']
sdp.add(sdp_group=p['p_name'],
sdp_name=name,
sdp_source=src['hg_member'],
sdp_source_ip=source_ip,
sdp_destination=dst['hg_member'],
sdp_destination_ip=destination_ip,
sdp_bidir=svc['st_bidir'],
sdp_port=svc['st_port'],
sdp_protocol=svc['st_protocol'])
sdp.save('testdata/mock-sdpdb.csv')
return redirect(url_for('index_hostgroups', sdp_msg=errors)+"#renderedpolicies")
@b_ui.route('/resetsdp', methods=['POST'])
def reset_sdp():
# just erase the whole thing - usually done prior to a full recompute
sdp.zero()
sdp.save('testdata/mock-sdpdb.csv')
return redirect(url_for('index_hostgroups')+"#renderedpolicies")
if __name__ == '__main__':
b_ui.run(debug=True, use_reloader=False, host='0.0.0.0', port=5010)
| gpl-2.0 |
ashrith/dpkt | dpkt/diameter.py | 15 | 5848 | # $Id: diameter.py 23 2006-11-08 15:45:33Z dugsong $
"""Diameter."""
import struct
import dpkt
# Diameter Base Protocol - RFC 3588
# http://tools.ietf.org/html/rfc3588
# Request/Answer Command Codes
ABORT_SESSION = 274
ACCOUTING = 271
CAPABILITIES_EXCHANGE = 257
DEVICE_WATCHDOG = 280
DISCONNECT_PEER = 282
RE_AUTH = 258
SESSION_TERMINATION = 275
class Diameter(dpkt.Packet):
__hdr__ = (
('v', 'B', 1),
('len', '3s', 0),
('flags', 'B', 0),
('cmd', '3s', 0),
('app_id', 'I', 0),
('hop_id', 'I', 0),
('end_id', 'I', 0)
)
def _get_r(self):
return (self.flags >> 7) & 0x1
def _set_r(self, r):
self.flags = (self.flags & ~0x80) | ((r & 0x1) << 7)
request_flag = property(_get_r, _set_r)
def _get_p(self):
return (self.flags >> 6) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x40) | ((p & 0x1) << 6)
proxiable_flag = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 5) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x20) | ((e & 0x1) << 5)
error_flag = property(_get_e, _set_e)
def _get_t(self):
return (self.flags >> 4) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x10) | ((t & 0x1) << 4)
retransmit_flag = property(_get_t, _set_t)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cmd = (ord(self.cmd[0]) << 16) | \
(ord(self.cmd[1]) << 8) | \
ord(self.cmd[2])
self.len = (ord(self.len[0]) << 16) | \
(ord(self.len[1]) << 8) | \
ord(self.len[2])
self.data = self.data[:self.len - self.__hdr_len__]
l = []
while self.data:
avp = AVP(self.data)
l.append(avp)
self.data = self.data[len(avp):]
self.data = self.avps = l
def pack_hdr(self):
self.len = chr((self.len >> 16) & 0xff) + \
chr((self.len >> 8) & 0xff) + \
chr(self.len & 0xff)
self.cmd = chr((self.cmd >> 16) & 0xff) + \
chr((self.cmd >> 8) & 0xff) + \
chr(self.cmd & 0xff)
return dpkt.Packet.pack_hdr(self)
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class AVP(dpkt.Packet):
__hdr__ = (
('code', 'I', 0),
('flags', 'B', 0),
('len', '3s', 0),
)
def _get_v(self):
return (self.flags >> 7) & 0x1
def _set_v(self, v):
self.flags = (self.flags & ~0x80) | ((v & 0x1) << 7)
vendor_flag = property(_get_v, _set_v)
def _get_m(self):
return (self.flags >> 6) & 0x1
def _set_m(self, m):
self.flags = (self.flags & ~0x40) | ((m & 0x1) << 6)
mandatory_flag = property(_get_m, _set_m)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
protected_flag = property(_get_p, _set_p)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.len = (ord(self.len[0]) << 16) | \
(ord(self.len[1]) << 8) | \
ord(self.len[2])
if self.vendor_flag:
self.vendor = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:self.len - self.__hdr_len__]
else:
self.data = self.data[:self.len - self.__hdr_len__]
def pack_hdr(self):
self.len = chr((self.len >> 16) & 0xff) + \
chr((self.len >> 8) & 0xff) + \
chr(self.len & 0xff)
data = dpkt.Packet.pack_hdr(self)
if self.vendor_flag:
data += struct.pack('>I', self.vendor)
return data
def __len__(self):
length = self.__hdr_len__ + \
sum(map(len, self.data))
if self.vendor_flag:
length += 4
return length
if __name__ == '__main__':
import unittest
class DiameterTestCase(unittest.TestCase):
def testPack(self):
d = Diameter(self.s)
self.failUnless(self.s == str(d))
d = Diameter(self.t)
self.failUnless(self.t == str(d))
def testUnpack(self):
d = Diameter(self.s)
self.failUnless(d.len == 40)
#self.failUnless(d.cmd == DEVICE_WATCHDOG_REQUEST)
self.failUnless(d.request_flag == 1)
self.failUnless(d.error_flag == 0)
self.failUnless(len(d.avps) == 2)
avp = d.avps[0]
#self.failUnless(avp.code == ORIGIN_HOST)
self.failUnless(avp.mandatory_flag == 1)
self.failUnless(avp.vendor_flag == 0)
self.failUnless(avp.len == 12)
self.failUnless(len(avp) == 12)
self.failUnless(avp.data == '\x68\x30\x30\x32')
# also test the optional vendor id support
d = Diameter(self.t)
self.failUnless(d.len == 44)
avp = d.avps[0]
self.failUnless(avp.vendor_flag == 1)
self.failUnless(avp.len == 16)
self.failUnless(len(avp) == 16)
self.failUnless(avp.vendor == 3735928559)
self.failUnless(avp.data == '\x68\x30\x30\x32')
s = '\x01\x00\x00\x28\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\x40\x00\x00\x0c\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
t = '\x01\x00\x00\x2c\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\xc0\x00\x00\x10\xde\xad\xbe\xef\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
unittest.main()
| bsd-3-clause |
stwunsch/gnuradio | gr-digital/python/digital/qa_glfsr_source.py | 57 | 3531 | #!/usr/bin/env python
#
# Copyright 2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_glfsr_source(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_make_b(self):
src = digital.glfsr_source_b(16)
self.assertEquals(src.mask(), 0x8016)
self.assertEquals(src.period(), 2**16-1)
def test_001_degree_b(self):
self.assertRaises(RuntimeError,
lambda: digital.glfsr_source_b(0))
self.assertRaises(RuntimeError,
lambda: digital.glfsr_source_b(33))
def test_002_correlation_b(self):
for degree in range(1,11): # Higher degrees take too long to correlate
src = digital.glfsr_source_b(degree, False)
b2f = digital.chunks_to_symbols_bf((-1.0,1.0), 1)
dst = blocks.vector_sink_f()
del self.tb # Discard existing top block
self.tb = gr.top_block()
self.tb.connect(src, b2f, dst)
self.tb.run()
self.tb.disconnect_all()
actual_result = dst.data()
R = auto_correlate(actual_result)
self.assertEqual(R[0], float(len(R))) # Auto-correlation peak at origin
for i in range(len(R)-1):
self.assertEqual(R[i+1], -1.0) # Auto-correlation minimum everywhere else
def test_003_make_f(self):
src = digital.glfsr_source_f(16)
self.assertEquals(src.mask(), 0x8016)
self.assertEquals(src.period(), 2**16-1)
def test_004_degree_f(self):
self.assertRaises(RuntimeError,
lambda: digital.glfsr_source_f(0))
self.assertRaises(RuntimeError,
lambda: digital.glfsr_source_f(33))
def test_005_correlation_f(self):
for degree in range(1,11): # Higher degrees take too long to correlate
src = digital.glfsr_source_f(degree, False)
dst = blocks.vector_sink_f()
del self.tb # Discard existing top block
self.tb = gr.top_block()
self.tb.connect(src, dst)
self.tb.run()
actual_result = dst.data()
R = auto_correlate(actual_result)
self.assertEqual(R[0], float(len(R))) # Auto-correlation peak at origin
for i in range(len(R)-1):
self.assertEqual(R[i+1], -1.0) # Auto-correlation minimum everywhere else
def auto_correlate(data):
l = len(data)
R = [0,]*l
for lag in range(l):
for i in range(l):
R[lag] += data[i]*data[i-lag]
return R
if __name__ == '__main__':
gr_unittest.run(test_glfsr_source, "test_glfsr_source.xml")
| gpl-3.0 |
unbit/spockfs | spockfs_tests.py | 1 | 6150 | import unittest
import os
import shutil
import stat
import time
import xattr
FS_DIR = '/tmp/.spockfs_testdir'
# clear the fs
for item in os.listdir(FS_DIR):
path = os.path.join(FS_DIR, item)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
class SpockFS(unittest.TestCase):
def setUp(self):
self.testpath = FS_DIR
def test_mkdir(self):
path = os.path.join(self.testpath, 'foobar')
self.assertIsNone(os.mkdir(path))
self.assertRaises(OSError, os.mkdir, path)
self.assertIsNone(os.rmdir(path))
self.assertRaises(OSError, os.rmdir, path)
def test_stat(self):
path = os.path.join(self.testpath, 'tinyfile')
with open(path, 'w') as f:
f.write('hello')
self.assertIsNone(os.chmod(path, 0))
s = os.stat(path)
self.assertEqual(s.st_size, 5)
self.assertIsNone(os.remove(path))
def test_unlink(self):
path = os.path.join(self.testpath, 'notfound')
self.assertRaises(OSError, os.remove, path)
with open(path, 'w') as f:
f.write('i do not exist')
self.assertIsNone(os.remove(path))
self.assertFalse(os.path.exists(path))
def test_rmdir(self):
path = os.path.join(self.testpath, 'destroyme')
self.assertIsNone(os.mkdir(path))
self.assertTrue(os.path.exists(path))
self.assertIsNone(os.rmdir(path))
self.assertFalse(os.path.exists(path))
def test_move(self):
path = os.path.join(self.testpath, 'movehere')
self.assertIsNone(os.mkdir(path))
path0 = os.path.join(self.testpath, 'item001')
with open(path0, 'w') as f:
f.write('moveme')
path1 = os.path.join(self.testpath, 'movehere', 'item0002')
self.assertIsNone(os.rename(path0, path1))
with open(path1, 'r') as f:
self.assertEqual(f.read(), 'moveme')
def test_bigfile(self):
path = os.path.join(self.testpath, 'bigfile')
with open(path, 'w') as f:
f.write('spock' * 179 * 1024)
with open(path, 'r') as f:
self.assertEqual(f.read(), 'spock' * 179 * 1024)
def test_bigfile_with_random(self):
path = os.path.join(self.testpath, 'bigfile2')
blob = os.urandom(1024 * 1024)
with open(path, 'w') as f:
f.write(blob)
with open(path, 'r') as f:
self.assertEqual(f.read(), blob)
self.assertIsNone(os.remove(path))
self.assertFalse(os.path.exists(path))
def test_symlink(self):
path = os.path.join(self.testpath, 'linkme')
with open(path, 'w') as f:
f.write('linked')
path2 = os.path.join(self.testpath, 'iamalink')
self.assertIsNone(os.symlink(path, path2))
self.assertEqual(os.readlink(path2), path)
s = os.lstat(path2)
self.assertTrue(stat.S_ISLNK(s.st_mode))
self.assertIsNone(os.remove(path2))
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path2))
def test_link(self):
path = os.path.join(self.testpath, 'fastcopy')
with open(path, 'w') as f:
f.write('copyme')
path0 = os.path.join(self.testpath, 'linkdir0')
self.assertIsNone(os.mkdir(path0))
path1 = os.path.join(path0, 'linkdir1')
self.assertIsNone(os.mkdir(path1))
path2 = os.path.join(path1, 'linkdir2')
self.assertIsNone(os.mkdir(path2))
path3 = os.path.join(path2, 'linked')
self.assertIsNone(os.link(path, path3))
with open(path3, 'r') as f:
self.assertEqual(f.read(), 'copyme')
def test_truncate(self):
path = os.path.join(self.testpath, 'resizeme')
with open(path, 'w') as f:
os.ftruncate(f.fileno(), 179)
s = os.stat(path)
self.assertEqual(s.st_size, 179)
def test_utimens(self):
path = os.path.join(self.testpath, 'touch')
with open(path, 'w') as f:
os.ftruncate(f.fileno(), 1)
now = 179
self.assertIsNone(os.utime(path, (now, now)))
s = os.stat(path)
self.assertEqual(s[7], 179)
self.assertEqual(s[8], 179)
def test_xattr(self):
path = os.path.join(self.testpath, 'keyvalue')
with open(path, 'w') as f:
os.ftruncate(f.fileno(), 1)
x = xattr.xattr(path)
x['user.spock_key'] = '179'
x['user.spock_key2'] = '276'
self.assertEqual(x['user.spock_key'], '179')
self.assertEqual(x['user.spock_key2'], '276')
del(x['user.spock_key2'])
self.assertTrue('user.spock_key2' not in x)
x['user.spock_key2'] = '276'
self.assertTrue('user.spock_key' in x)
self.assertTrue('user.spock_key2' in x)
self.assertFalse('user.spock_key3' in x)
def test_readdir(self):
path = os.path.join(self.testpath, 'scanme')
self.assertIsNone(os.mkdir(path))
path0 = os.path.join(path, 'file000')
path1 = os.path.join(path, 'file001')
with open(path0, 'w') as f:
f.write('0')
with open(path1, 'w') as f:
f.write('1')
items = ['.', '..', 'file000', 'file001']
ldir = ['.', '..'] + os.listdir(path)
ldir.sort()
self.assertEqual(items, ldir)
def test_mknod(self):
path = os.path.join(self.testpath, 'the_fifo_of_spock')
self.assertIsNone(os.mknod(path, stat.S_IFIFO))
s = os.stat(path)
self.assertTrue(stat.S_ISFIFO(s.st_mode))
def test_statfs(self):
s = os.statvfs(self.testpath)
self.assertTrue(s.f_bsize > 0)
def test_access(self):
path = os.path.join(self.testpath, 'forbidden')
with open(path, 'w') as f:
f.write('x')
self.assertIsNone(os.chmod(path, 0))
self.assertFalse(os.access(path, os.W_OK))
self.assertFalse(os.access(path, os.R_OK))
self.assertIsNone(os.chmod(path, stat.S_IRUSR))
self.assertTrue(os.access(path, os.R_OK))
if __name__ == '__main__':
unittest.main()
| mit |
Arsey/keras-transfer-learning-for-oxford102 | models/inception_v3.py | 1 | 2743 | from keras.applications.inception_v3 import InceptionV3 as KerasInceptionV3
from keras.layers import GlobalAveragePooling2D, Dense
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing import image
import numpy as np
import config
from .base_model import BaseModel
class InceptionV3(BaseModel):
noveltyDetectionLayerName = 'fc1'
noveltyDetectionLayerSize = 1024
def __init__(self, *args, **kwargs):
super(InceptionV3, self).__init__(*args, **kwargs)
if not self.freeze_layers_number:
# we chose to train the top 2 identity blocks and 1 convolution block
self.freeze_layers_number = 80
self.img_size = (299, 299)
def _create(self):
base_model = KerasInceptionV3(weights='imagenet', include_top=False, input_tensor=self.get_input_tensor())
self.make_net_layers_non_trainable(base_model)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(self.noveltyDetectionLayerSize, activation='elu', name=self.noveltyDetectionLayerName)(x)
predictions = Dense(len(config.classes), activation='softmax')(x)
self.model = Model(input=base_model.input, output=predictions)
def preprocess_input(self, x):
x /= 255.
x -= 0.5
x *= 2.
return x
def load_img(self, img_path):
img = image.load_img(img_path, target_size=self.img_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
return self.preprocess_input(x)[0]
@staticmethod
def apply_mean(image_data_generator):
pass
def _fine_tuning(self):
self.freeze_top_layers()
self.model.compile(
loss='categorical_crossentropy',
optimizer=SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
self.model.fit_generator(
self.get_train_datagen(rotation_range=30.,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
preprocessing_function=self.preprocess_input),
samples_per_epoch=config.nb_train_samples,
nb_epoch=self.nb_epoch,
validation_data=self.get_validation_datagen(preprocessing_function=self.preprocess_input),
nb_val_samples=config.nb_validation_samples,
callbacks=self.get_callbacks(config.get_fine_tuned_weights_path(), patience=self.fine_tuning_patience),
class_weight=self.class_weight)
self.model.save(config.get_model_path())
def inst_class(*args, **kwargs):
return InceptionV3(*args, **kwargs)
| mit |
CatsAndDogsbvba/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/lmfit/models.py | 7 | 16554 | import numpy as np
from .model import Model
from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, pearson7,
step, rectangle, breit_wigner, logistic,
students_t, lognormal, damped_oscillator,
expgaussian, skewed_gaussian, donaich,
skewed_voigt, exponential, powerlaw, linear,
parabolic)
from . import lineshapes
from .asteval import Interpreter
from .astutils import get_ast_names
class DimensionalError(Exception):
pass
def _validate_1d(independent_vars):
if len(independent_vars) != 1:
raise DimensionalError(
"This model requires exactly one independent variable.")
def index_of(arr, val):
"""return index of array nearest to a value
"""
if val < min(arr):
return 0
return np.abs(arr-val).argmin()
def fwhm_expr(model):
"return constraint expression for fwhm"
return "%.7f*%ssigma" % (model.fwhm_factor, model.prefix)
def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
"estimate amp, cen, sigma for a peak, create params"
if x is None:
return 1.0, 0.0, 1.0
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
imaxy = index_of(y, maxy)
cen = x[imaxy]
amp = (maxy - miny)*2.0
sig = (maxx-minx)/6.0
halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
if negative:
imaxy = index_of(y, miny)
amp = -(maxy - miny)*2.0
halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
if len(halfmax_vals) > 2:
sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
cen = x[halfmax_vals].mean()
amp = amp*sig*ampscale
sig = sig*sigscale
pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
pars['%ssigma' % model.prefix].set(min=0.0)
return pars
def update_param_vals(pars, prefix, **kwargs):
"""convenience function to update parameter values
with keyword arguments"""
for key, val in kwargs.items():
pname = "%s%s" % (prefix, key)
if pname in pars:
pars[pname].value = val
return pars
COMMON_DOC = """
Parameters
----------
independent_vars: list of strings to be set as variable names
missing: None, 'drop', or 'raise'
None: Do not check for null or missing values.
'drop': Drop null or missing observations in data.
Use pandas.isnull if pandas is available; otherwise,
silently fall back to numpy.isnan.
'raise': Raise a (more helpful) exception when data contains null
or missing values.
prefix: string to prepend to paramter names, needed to add two Models that
have parameter names in common. None by default.
"""
class ConstantModel(Model):
__doc__ = "x -> c" + COMMON_DOC
def __init__(self, *args, **kwargs):
def constant(x, c):
return c
super(ConstantModel, self).__init__(constant, *args, **kwargs)
def guess(self, data, **kwargs):
pars = self.make_params()
pars['%sc' % self.prefix].set(value=data.mean())
return update_param_vals(pars, self.prefix, **kwargs)
class LinearModel(Model):
__doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LinearModel, self).__init__(linear, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
sval, oval = 0., 0.
if x is not None:
sval, oval = np.polyfit(x, data, 1)
pars = self.make_params(intercept=oval, slope=sval)
return update_param_vals(pars, self.prefix, **kwargs)
class QuadraticModel(Model):
__doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
def __init__(self, *args, **kwargs):
super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
a, b, c = 0., 0., 0.
if x is not None:
a, b, c = np.polyfit(x, data, 2)
pars = self.make_params(a=a, b=b, c=c)
return update_param_vals(pars, self.prefix, **kwargs)
ParabolicModel = QuadraticModel
class PolynomialModel(Model):
__doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
MAX_DEGREE=7
DEGREE_ERR = "degree must be an integer less than %d."
def __init__(self, degree, *args, **kwargs):
if not isinstance(degree, int) or degree > self.MAX_DEGREE:
raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
self.poly_degree = degree
pnames = ['c%i' % (i) for i in range(degree + 1)]
kwargs['param_names'] = pnames
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
pars = self.make_params()
if x is not None:
out = np.polyfit(x, data, self.poly_degree)
for i, coef in enumerate(out[::-1]):
pars['%sc%i'% (self.prefix, i)].set(value=coef)
return update_param_vals(pars, self.prefix, **kwargs)
class GaussianModel(Model):
__doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class LorentzianModel(Model):
__doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
return update_param_vals(pars, self.prefix, **kwargs)
class VoigtModel(Model):
__doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
fwhm_factor = 3.60131
def __init__(self, *args, **kwargs):
super(VoigtModel, self).__init__(voigt, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative,
ampscale=1.5, sigscale=0.65)
return update_param_vals(pars, self.prefix, **kwargs)
class PseudoVoigtModel(Model):
__doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
self.set_param_hint('fraction', value=0.5)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
pars['%sfraction' % self.prefix].set(value=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class Pearson7Model(Model):
__doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
def __init__(self, *args, **kwargs):
super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
self.set_param_hint('expon', value=1.5)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sexpon' % self.prefix].set(value=1.5)
return update_param_vals(pars, self.prefix, **kwargs)
class StudentsTModel(Model):
__doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class BreitWignerModel(Model):
__doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
def __init__(self, *args, **kwargs):
super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sq' % self.prefix].set(value=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
class LognormalModel(Model):
__doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
pars['%ssigma' % self.prefix].set(min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class DampedOscillatorModel(Model):
__doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars =guess_from_peak(self, data, x, negative,
ampscale=0.1, sigscale=0.1)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialGaussianModel(Model):
__doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class SkewedGaussianModel(Model):
__doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class DonaichModel(Model):
__doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DonaichModel, self).__init__(donaich, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class PowerLawModel(Model):
__doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
def __init__(self, *args, **kwargs):
super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
except:
expon, amp = 1, np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialModel(Model):
__doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
except:
sval, oval = 1., np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
return update_param_vals(pars, self.prefix, **kwargs)
class StepModel(Model):
__doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StepModel, self).__init__(step, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center=(xmax+xmin)/2.0)
pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class RectangleModel(Model):
__doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
def __init__(self, *args, **kwargs):
super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
self.set_param_hint('midpoint',
expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
self.prefix))
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center1=(xmax+xmin)/4.0,
center2=3*(xmax+xmin)/4.0)
pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class ExpressionModel(Model):
"""Model from User-supplied expression
%s
""" % COMMON_DOC
idvar_missing = "No independent variable found in\n %s"
idvar_notfound = "Cannot find independent variables '%s' in\n %s"
def __init__(self, expr, independent_vars=None, init_script=None,
*args, **kwargs):
# create ast evaluator, load custom functions
self.asteval = Interpreter()
for name in lineshapes.functions:
self.asteval.symtable[name] = getattr(lineshapes, name, None)
if init_script is not None:
self.asteval.eval(init_script)
# save expr as text, parse to ast, save for later use
self.expr = expr
self.astcode = self.asteval.parse(expr)
# find all symbol names found in expression
sym_names = get_ast_names(self.astcode)
if independent_vars is None and 'x' in sym_names:
independent_vars = ['x']
if independent_vars is None:
raise ValueError(self.idvar_missing % (self.expr))
# determine which named symbols are parameter names,
# try to find all independent variables
idvar_found = [False]*len(independent_vars)
param_names = []
for name in sym_names:
if name in independent_vars:
idvar_found[independent_vars.index(name)] = True
elif name not in self.asteval.symtable:
param_names.append(name)
# make sure we have all independent parameters
if not all(idvar_found):
lost = []
for ix, found in enumerate(idvar_found):
if not found:
lost.append(independent_vars[ix])
lost = ', '.join(lost)
raise ValueError(self.idvar_notfound % (lost, self.expr))
kwargs['independent_vars'] = independent_vars
def _eval(**kwargs):
for name, val in kwargs.items():
self.asteval.symtable[name] = val
return self.asteval.run(self.astcode)
super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
# set param names here, and other things normally
# set in _parse_params(), which will be short-circuited.
self.independent_vars = independent_vars
self._func_allargs = independent_vars + param_names
self._param_names = set(param_names)
self._func_haskeywords = True
self.def_vals = {}
def __repr__(self):
return "<lmfit.ExpressionModel('%s')>" % (self.expr)
def _parse_params(self):
"""ExpressionModel._parse_params is over-written (as `pass`)
to prevent normal parsing of function for parameter names
"""
pass
| apache-2.0 |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/introspection.py | 624 | 1426 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
| gpl-3.0 |
ankraft/onem2mlib | examples/groups.py | 1 | 1616 | #
# groups.py
#
# (c) 2017 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# This example shows how to create a <group> resource and how to use the fanOutPoint.jjjkkk
# Everything is deleted in the end.
#
import uuid, sys
sys.path.append('..')
from onem2mlib import *
if __name__ == '__main__':
# create session
session = Session('http://localhost:8282', 'admin:admin')
# get the <CSEBase> resource
cse = CSEBase(session, 'mn-cse')
# create an <AE> resource
aeName = 'exampleAE_'+str(uuid.uuid4().hex) # unique name for the <AE>
ae = AE(cse, resourceName=aeName)
# create two <container>'s' and add them to the <AE>
cnt1 = Container(ae, resourceName='container1')
cnt2 = Container(ae, resourceName='container2')
# create a <group> reosurce that contains both containers, and add it to the <ae>
grp = Group(ae, resourceName='myGroup', resources=[cnt1, cnt2],)
# print the group
print(grp)
# add a <contentInstance> to each <container> via the <group> resource's fanOutPoint
# Note, that we just create a <contentInstance, but we don't set a parent for this
# <contentInstance>, or send it to the CSE (yet). This is done when assigning it
# to the whole group.
cin = ContentInstance(content='Some value', instantly=False)
grp.createGroupResources(cin)
# Check whether the <contentIntsance>'s do actually contain the same value
cin1 = cnt1.latestContentInstance()
cin2 = cnt2.latestContentInstance()
if cin1.content == cin2.content:
print('okay!')
else:
print('OH NO!')
# delete the <AE> to clean up everything
ae.deleteFromCSE()
| bsd-3-clause |
wkritzinger/asuswrt-merlin | release/src/router/samba36/lib/dnspython/tests/flags.py | 21 | 2093 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.flags
import dns.rcode
import dns.opcode
class FlagsTestCase(unittest.TestCase):
def test_rcode1(self):
self.failUnless(dns.rcode.from_text('FORMERR') == dns.rcode.FORMERR)
def test_rcode2(self):
self.failUnless(dns.rcode.to_text(dns.rcode.FORMERR) == "FORMERR")
def test_rcode3(self):
self.failUnless(dns.rcode.to_flags(dns.rcode.FORMERR) == (1, 0))
def test_rcode4(self):
self.failUnless(dns.rcode.to_flags(dns.rcode.BADVERS) == \
(0, 0x01000000))
def test_rcode6(self):
self.failUnless(dns.rcode.from_flags(0, 0x01000000) == \
dns.rcode.BADVERS)
def test_rcode6(self):
self.failUnless(dns.rcode.from_flags(5, 0) == dns.rcode.REFUSED)
def test_rcode7(self):
def bad():
dns.rcode.to_flags(4096)
self.failUnlessRaises(ValueError, bad)
def test_flags1(self):
self.failUnless(dns.flags.from_text("RA RD AA QR") == \
dns.flags.QR|dns.flags.AA|dns.flags.RD|dns.flags.RA)
def test_flags2(self):
flags = dns.flags.QR|dns.flags.AA|dns.flags.RD|dns.flags.RA
self.failUnless(dns.flags.to_text(flags) == "QR AA RD RA")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/rpc.py | 44 | 20228 | """RPC Implemention, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
has only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| SocketServer.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import sys
import os
import socket
import select
import SocketServer
import struct
import cPickle as pickle
import threading
import Queue
import traceback
import copy_reg
import types
import marshal
def unpickle_code(ms):
co = marshal.loads(ms)
assert isinstance(co, types.CodeType)
return co
def pickle_code(co):
assert isinstance(co, types.CodeType)
ms = marshal.dumps(co)
return unpickle_code, (ms,)
# XXX KBK 24Aug02 function pickling capability not used in Idle
# def unpickle_function(ms):
# return ms
# def pickle_function(fn):
# assert isinstance(fn, type.FunctionType)
# return repr(fn)
copy_reg.pickle(types.CodeType, pickle_code, unpickle_code)
# copy_reg.pickle(types.FunctionType, pickle_function, unpickle_function)
BUFSIZE = 8*1024
LOCALHOST = '127.0.0.1'
class RPCServer(SocketServer.TCPServer):
def __init__(self, addr, handlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
SocketServer.TCPServer.__init__(self, addr, handlerclass)
def server_bind(self):
"Override TCPServer method, no bind() phase for connecting entity"
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"Override TCPServer method, return already connected socket"
return self.socket, self.server_address
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print>>erf, '\n' + '-'*40
print>>erf, 'Unhandled server exception!'
print>>erf, 'Thread: %s' % threading.currentThread().getName()
print>>erf, 'Client Address: ', client_address
print>>erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print>>erf, '\n*** Unrecoverable, server exiting!'
print>>erf, '-'*40
os._exit(0)
#----------------- end class RPCServer --------------------
objecttable = {}
request_queue = Queue.Queue(0)
response_queue = Queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.currentThread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit()
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.currentThread().getName())
for a in args:
s = s + " " + str(a)
print>>sys.__stderr__, s
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except socket.error:
raise
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print>>sys.__stderr__, msg % (oid, method, args)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug("asyncreturn:%d:call getresponse(): " % seq)
response = self.getresponse(seq, wait=0.05)
self.debug(("asyncreturn:%d:response: " % seq), response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == "OK":
return what
if how == "QUEUED":
return None
if how == "EXCEPTION":
self.debug("decoderesponse: EXCEPTION")
return None
if how == "EOF":
self.debug("decoderesponse: EOF")
self.decode_interrupthook()
return None
if how == "ERROR":
self.debug("decoderesponse: Internal ERROR:", what)
raise RuntimeError, what
raise SystemError, (how, what)
def decode_interrupthook(self):
""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug("mainloop:return")
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == "OK":
response = how, self._proxify(what)
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, types.ListType):
return map(self._proxify, obj)
# XXX Check for other types -- not currently needed
return obj
def _getresponse(self, myseq, wait):
self.debug("_getresponse:myseq:", myseq)
if threading.currentThread() is self.sockthread:
# this thread does all reading of requests or responses
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
# wait for notification from socket handling thread
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug("_getresponse:%s: thread woke up: response: %s" %
(myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug("putmessage:%d:" % message[0])
try:
s = pickle.dumps(message)
except pickle.PicklingError:
print >>sys.__stderr__, "Cannot pickle:", repr(message)
raise
s = struct.pack("<i", len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise IOError, "socket no longer exists"
except socket.error:
raise
else:
s = s[n:]
buffer = ""
bufneed = 4
bufstate = 0 # meaning: 0 => reading count; 1 => reading data
def pollpacket(self, wait):
self._stage0()
if len(self.buffer) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except socket.error:
raise EOFError
if len(s) == 0:
raise EOFError
self.buffer += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buffer) >= 4:
s = self.buffer[:4]
self.buffer = self.buffer[4:]
self.bufneed = struct.unpack("<i", s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buffer) >= self.bufneed:
packet = self.buffer[:self.bufneed]
self.buffer = self.buffer[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessage(self, wait):
packet = self.pollpacket(wait)
if packet is None:
return None
try:
message = pickle.loads(packet)
except pickle.UnpicklingError:
print >>sys.__stderr__, "-----------------------"
print >>sys.__stderr__, "cannot unpickle packet:", repr(packet)
traceback.print_stack(file=sys.__stderr__)
print >>sys.__stderr__, "-----------------------"
raise
return message
def pollresponse(self, myseq, wait):
"""Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread.
"""
while 1:
# send queued response if there is one available
try:
qmsg = response_queue.get(0)
except Queue.Empty:
pass
else:
seq, response = qmsg
message = (seq, ('OK', response))
self.putmessage(message)
# poll for message on link
try:
message = self.pollmessage(wait)
if message is None: # socket not ready
return None
except EOFError:
self.handle_EOF()
return None
except AttributeError:
return None
seq, resq = message
how = resq[0]
self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
# process or queue a request
if how in ("CALL", "QUEUE"):
self.debug("pollresponse:%d:localcall:call:" % seq)
response = self.localcall(seq, resq)
self.debug("pollresponse:%d:localcall:response:%s"
% (seq, response))
if how == "CALL":
self.putmessage((seq, response))
elif how == "QUEUE":
# don't acknowledge the 'queue' request!
pass
continue
# return if completed message transaction
elif seq == myseq:
return resq
# must be a response for a different thread:
else:
cv = self.cvars.get(seq, None)
# response involving unknown sequence number is discarded,
# probably intended for prior incarnation of server
if cv is not None:
cv.acquire()
self.responses[seq] = resq
cv.notify()
cv.release()
continue
def handle_EOF(self):
"action taken upon link being closed by peer"
self.EOFhook()
self.debug("handle_EOF")
for key in self.cvars:
cv = self.cvars[key]
cv.acquire()
self.responses[key] = ('EOF', None)
cv.notify()
cv.release()
# call our (possibly overridden) exit function
self.exithook()
def EOFhook(self):
"Classes using rpc client/server can override to augment EOF action"
pass
#----------------- end class SocketIO --------------------
class RemoteObject(object):
# Token mix-in class
pass
def remoteref(obj):
oid = id(obj)
objecttable[oid] = obj
return RemoteProxy(oid)
class RemoteProxy(object):
def __init__(self, oid):
self.oid = oid
class RPCHandler(SocketServer.BaseRequestHandler, SocketIO):
debugging = False
location = "#S" # Server
def __init__(self, sock, addr, svr):
svr.current_handler = self ## cgt xxx
SocketIO.__init__(self, sock)
SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr)
def handle(self):
"handle() method required by SocketServer"
self.mainloop()
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCClient(SocketIO):
debugging = False
location = "#C" # Client
nextseq = 1 # Requests coming from the client are odd numbered
def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.listening_sock = socket.socket(family, type)
self.listening_sock.bind(address)
self.listening_sock.listen(1)
def accept(self):
working_sock, address = self.listening_sock.accept()
if self.debugging:
print>>sys.__stderr__, "****** Connection request from ", address
if address[0] == LOCALHOST:
SocketIO.__init__(self, working_sock)
else:
print>>sys.__stderr__, "** Invalid host: ", address
raise socket.error
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCProxy(object):
__methods = None
__attributes = None
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
if self.__methods is None:
self.__getmethods()
if self.__methods.get(name):
return MethodProxy(self.sockio, self.oid, name)
if self.__attributes is None:
self.__getattributes()
if name in self.__attributes:
value = self.sockio.remotecall(self.oid, '__getattribute__',
(name,), {})
return value
else:
raise AttributeError, name
def __getattributes(self):
self.__attributes = self.sockio.remotecall(self.oid,
"__attributes__", (), {})
def __getmethods(self):
self.__methods = self.sockio.remotecall(self.oid,
"__methods__", (), {})
def _getmethods(obj, methods):
# Helper to get a list of methods from an object
# Adds names to dictionary argument 'methods'
for name in dir(obj):
attr = getattr(obj, name)
if hasattr(attr, '__call__'):
methods[name] = 1
if type(obj) == types.InstanceType:
_getmethods(obj.__class__, methods)
if type(obj) == types.ClassType:
for super in obj.__bases__:
_getmethods(super, methods)
def _getattributes(obj, attributes):
for name in dir(obj):
attr = getattr(obj, name)
if not hasattr(attr, '__call__'):
attributes[name] = 1
class MethodProxy(object):
def __init__(self, sockio, oid, name):
self.sockio = sockio
self.oid = oid
self.name = name
def __call__(self, *args, **kwargs):
value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
return value
# XXX KBK 09Sep03 We need a proper unit test for this module. Previously
# existing test code was removed at Rev 1.27 (r34098).
| gpl-2.0 |
petemounce/ansible | lib/ansible/vars/manager.py | 4 | 26590 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from collections import defaultdict, MutableMapping
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
from ansible.inventory.host import Host
from ansible.inventory.helpers import sort_groups, get_group_vars
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems, string_types, text_type
from ansible.plugins import lookup_loader, vars_loader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars
from ansible.utils.unsafe_proxy import wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def strip_internal_keys(dirty):
'''
All keys stating with _ansible_ are internal, so create a copy of the 'dirty' dict
and remove them from the clean one before returning it
'''
clean = dirty.copy()
for k in dirty.keys():
if isinstance(k, string_types) and k.startswith('_ansible_'):
del clean[k]
elif isinstance(dirty[k], dict):
clean[k] = strip_internal_keys(dirty[k])
return clean
class VariableManager:
def __init__(self, loader=None, inventory=None):
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self._options_vars = defaultdict(dict)
# bad cache plugin is not fatal error
try:
self._fact_cache = FactCache()
except AnsibleError as e:
display.warning(to_native(e))
# fallback to a dict as in memory cache
self._fact_cache = {}
def __getstate__(self):
data = dict(
fact_cache = self._fact_cache,
np_fact_cache = self._nonpersistent_fact_cache,
vars_cache = self._vars_cache,
extra_vars = self._extra_vars,
host_vars_files = self._host_vars_files,
group_vars_files = self._group_vars_files,
omit_token = self._omit_token,
options_vars = self._options_vars,
inventory = self._inventory,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = data.get('inventory', None)
self._options_vars = data.get('options_vars', dict())
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
@property
def options_vars(self):
''' ensures a clean copy of the options_vars are made '''
return self._options_vars.copy()
@options_vars.setter
def options_vars(self, value):
''' ensures a clean copy of the options_vars are used to set the value '''
assert isinstance(value, dict)
self._options_vars = value.copy()
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
display.debug("in VariableManager get_vars()")
all_vars = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task and task._role is not None and (play or task.action == 'include_role'):
all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()))
if host:
### INIT WORK (use unsafe as we are going to copy/merge vars, no need to x2 copy)
# basedir, THE 'all' group and the rest of groups for a host, used below
basedir = self._loader.get_basedir()
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
### internal fuctions that actually do the work ###
def _plugins_inventory(entities):
''' merges all entities by inventory source '''
data = {}
for inventory_dir in self._inventory._sources:
if ',' in inventory_dir: # skip host lists
continue
elif not os.path.isdir(inventory_dir): # always pass 'inventory directory'
inventory_dir = os.path.dirname(inventory_dir)
for plugin in vars_loader.all():
data = combine_vars(data, plugin.get_vars(self._loader, inventory_dir, entities))
return data
def _plugins_play(entities):
''' merges all entities adjacent to play '''
data = {}
for plugin in vars_loader.all():
data = combine_vars(data, plugin.get_vars(self._loader, basedir, entities))
return data
### configurable functions that are sortable via config ###
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
''' gets group vars from inventory '''
return get_group_vars(host_groups)
def groups_plugins_inventory():
''' gets plugin sources from inventory for groups '''
return _plugins_inventory(host_groups)
def groups_plugins_play():
''' gets plugin sources from play for groups '''
return _plugins_play(host_groups)
def plugins_by_groups():
'''
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
'''
data = {}
for group in host_groups:
data[group] = combine_vars(data[group], _plugins_inventory(group))
data[group] = combine_vars(data[group], _plugins_play(group))
return data
# Merge as per precedence config
for entry in C.VARIABLE_PRECEDENCE:
# only allow to call the functions we want exposed
if entry.startswith('_') or '.' in entry:
continue
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = combine_vars(all_vars, locals()[entry]())
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = combine_vars(all_vars, host.get_vars())
all_vars = combine_vars(all_vars, _plugins_inventory([host]))
all_vars = combine_vars(all_vars, _plugins_play([host]))
# finally, the facts caches for this host, if it exists
try:
host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
if not C.NAMESPACE_FACTS:
# allow facts to polute main namespace
all_vars = combine_vars(all_vars, host_facts)
# always return namespaced facts
all_vars = combine_vars(all_vars, {'ansible_facts': host_facts})
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
try:
data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
else:
# if include_delegate_to is set to False, we ignore the missing
# vars file here because we're working on a delegated host
if include_delegate_to:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item,
obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be
# because of that, so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
# By default, we now merge in all vars from all roles in the play,
# unless the user has disabled this via a config option
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False))
all_vars = combine_vars(all_vars, task.get_vars())
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
# next, we merge in role params and task include params
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain()))
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
all_vars = combine_vars(all_vars, task.get_include_params())
# extra vars
all_vars = combine_vars(all_vars, self._extra_vars)
# magic variables
all_vars = combine_vars(all_vars, magic_variables)
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(play, task, all_vars)
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
display.debug("done with get_vars()")
return all_vars
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = {}
variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
variables['ansible_playbook_python'] = sys.executable
if host:
# host already provides some magic vars via host.get_vars()
if self._inventory:
variables['groups'] = self._inventory.get_groups_dict()
if play:
variables['role_names'] = [r._role_name for r in play.roles]
if task:
if task._role:
variables['role_name'] = task._role.get_name()
variables['role_path'] = task._role._role_path
variables['role_uuid'] = text_type(task._role._uuid)
if self._inventory is not None:
if play:
templar = Templar(loader=self._loader)
if templar.is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
# add the list of hosts in the play, as adjusted for limit/filters
variables['ansible_play_hosts_all'] = [x.name for x in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x.name for x in self._inventory.get_hosts() if x.name not in play._removed_hosts]
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
# however this would take work in the templating engine, so for now we'll add both
variables['play_hosts'] = variables['ansible_play_batch']
# the 'omit' value alows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
# Set options vars
for option, option_value in iteritems(self._options_vars):
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def _get_delegated_vars(self, play, task, existing_variables):
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
templar = Templar(loader=self._loader, variables=vars_copy)
items = []
if task.loop is not None:
if task.loop in lookup_loader:
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar,
loader=self._loader, fail_on_undefined=True, convert_bare=False)
items = lookup_loader.get(task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
except AnsibleUndefinedVariable:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % task.loop)
else:
items = [None]
delegated_host_vars = dict()
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy['item'] = item
templar.set_available_variables(vars_copy)
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name is None:
raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# a dictionary of variables to use if we have to create a new host below
# we set the default port based on the default transport here, to make sure
# we use the proper default for windows
new_port = C.DEFAULT_REMOTE_PORT
if C.DEFAULT_TRANSPORT == 'winrm':
new_port = 5986
new_delegated_host_vars = dict(
ansible_host=delegated_host_name,
ansible_port=new_port,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
if delegated_host_name in C.LOCALHOST:
delegated_host = self._inventory.localhost
else:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=False,
)
return delegated_host_vars
def clear_facts(self, hostname):
'''
Clears the facts for a host
'''
if hostname in self._fact_cache:
del self._fact_cache[hostname]
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache.update(host.name, facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
if varname in self._vars_cache[host_name] and isinstance(self._vars_cache[host_name][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host_name] = combine_vars(self._vars_cache[host_name], {varname: value})
else:
self._vars_cache[host_name][varname] = value
| gpl-3.0 |
javierag/openchange | python/openchange/migration/openchangedb.py | 4 | 16431 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# OpenChangeDB DB schema and its migrations
# Copyright (C) Enrique J. Hernández Blasco <ejhernandez@zentyal.com> 2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Schema migration for OpenChangeDB "app" with SQL based backend
"""
from __future__ import print_function
from MySQLdb import ProgrammingError
from openchange.migration import migration, Migration
import os
from samba.param import LoadParm
import sys
import tdb
@migration('openchangedb', 1)
class InitialOCDBMigration(Migration):
description = 'initial'
@classmethod
def apply(cls, cur, **kwargs):
try:
cur.execute('SELECT COUNT(*) FROM `organizational_units`')
return False
except ProgrammingError as e:
if e.args[0] != 1146:
raise
# Table does not exist, then migrate
cur.execute("""CREATE TABLE IF NOT EXISTS `organizational_units` (
`id` INT NOT NULL AUTO_INCREMENT,
`organization` VARCHAR(165) NULL,
`administrative_group` VARCHAR(165) NULL,
PRIMARY KEY (`id`))
ENGINE = InnoDB""")
cur.execute("""CREATE UNIQUE INDEX `ou_unique` ON `organizational_units` (`organization` ASC, `administrative_group` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `public_folders` (
`ou_id` INT NOT NULL,
`ReplicaID` INT NULL,
`StoreGUID` VARCHAR(36) NULL,
PRIMARY KEY (`ou_id`),
CONSTRAINT `fk_public_folders_ou_id`
FOREIGN KEY (`ou_id`)
REFERENCES `organizational_units` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE TABLE IF NOT EXISTS `mailboxes` (
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`ou_id` INT NOT NULL,
`folder_id` BIGINT UNSIGNED NOT NULL,
`name` VARCHAR(256) NOT NULL,
`MailboxGUID` VARCHAR(36) NOT NULL,
`ReplicaGUID` VARCHAR(36) NOT NULL,
`ReplicaID` INT NOT NULL,
`SystemIdx` INT NOT NULL,
`indexing_url` VARCHAR(1024) NULL,
`locale` VARCHAR(15) NULL,
PRIMARY KEY (`id`),
CONSTRAINT `fk_mailboxes_ou_id`
FOREIGN KEY (`ou_id`)
REFERENCES `organizational_units` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_mailboxes_ou_id_idx` ON `mailboxes` (`ou_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `folders` (
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`ou_id` INT NOT NULL,
`folder_id` BIGINT UNSIGNED NOT NULL,
`folder_class` VARCHAR(6) NOT NULL DEFAULT 'system',
`mailbox_id` BIGINT UNSIGNED NULL,
`parent_folder_id` BIGINT UNSIGNED NULL,
`FolderType` INT NULL,
`SystemIdx` INT NULL,
`MAPIStoreURI` VARCHAR(1024) NULL,
PRIMARY KEY (`id`),
CONSTRAINT `fk_folders_ou_id`
FOREIGN KEY (`ou_id`)
REFERENCES `organizational_units` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE,
CONSTRAINT `fk_folders_mailbox_id`
FOREIGN KEY (`mailbox_id`)
REFERENCES `mailboxes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE,
CONSTRAINT `fk_folders_parent_folder_id`
FOREIGN KEY (`parent_folder_id`)
REFERENCES `folders` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_folders_ou_id_idx` ON `folders` (`ou_id` ASC)""")
cur.execute("""CREATE INDEX `fk_folders_mailbox_id_idx` ON `folders` (`mailbox_id` ASC)""")
cur.execute("""CREATE INDEX `fk_folders_parent_folder_id_idx` ON `folders` (`parent_folder_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `messages` (
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`ou_id` INT NULL,
`message_id` BIGINT UNSIGNED NULL,
`message_type` VARCHAR(45) NULL,
`folder_id` BIGINT UNSIGNED NULL,
`mailbox_id` BIGINT UNSIGNED NULL,
`normalized_subject` TEXT NULL,
PRIMARY KEY (`id`),
CONSTRAINT `fk_messages_ou_id`
FOREIGN KEY (`ou_id`)
REFERENCES `organizational_units` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE,
CONSTRAINT `fk_messages_folder_id`
FOREIGN KEY (`folder_id`)
REFERENCES `folders` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE,
CONSTRAINT `fk_messages_mailbox_id`
FOREIGN KEY (`mailbox_id`)
REFERENCES `mailboxes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_messages_ou_id_idx` ON `messages` (`ou_id` ASC)""")
cur.execute("""CREATE INDEX `fk_messages_folder_id_idx` ON `messages` (`folder_id` ASC)""")
cur.execute("""CREATE INDEX `fk_messages_mailbox_id_idx` ON `messages` (`mailbox_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `messages_properties` (
`message_id` BIGINT UNSIGNED NOT NULL,
`name` VARCHAR(128) NOT NULL,
`value` VARCHAR(512) NOT NULL,
CONSTRAINT `fk_messages_properties_message_id`
FOREIGN KEY (`message_id`)
REFERENCES `messages` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_messages_properties_message_id_idx` ON `messages_properties` (`message_id` ASC)""")
cur.execute("""CREATE INDEX `message_properties_message_id_name_idx`
ON `messages_properties` (`message_id` ASC, `name` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `mailboxes_properties` (
`mailbox_id` BIGINT UNSIGNED NOT NULL,
`name` VARCHAR(128) NOT NULL,
`value` VARCHAR(512) NULL,
CONSTRAINT `fk_mailboxes_properties_mailbox_id`
FOREIGN KEY (`mailbox_id`)
REFERENCES `mailboxes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_mailboxes_properties_mailbox_id_idx` ON `mailboxes_properties` (`mailbox_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `folders_properties` (
`folder_id` BIGINT UNSIGNED NOT NULL,
`name` VARCHAR(256) NOT NULL,
`value` VARCHAR(512) NULL,
CONSTRAINT `fk_folders_properties_folder_id`
FOREIGN KEY (`folder_id`)
REFERENCES `folders` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_folders_properties_folder_id_idx` ON `folders_properties` (`folder_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `servers` (
`id` INT UNSIGNED NOT NULL AUTO_INCREMENT,
`ou_id` INT NOT NULL,
`replica_id` INT NOT NULL DEFAULT 1,
`change_number` INT NOT NULL DEFAULT 1,
PRIMARY KEY (`id`),
CONSTRAINT `fk_servers_ou_id`
FOREIGN KEY (`ou_id`)
REFERENCES `organizational_units` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB""")
cur.execute("""CREATE INDEX `fk_servers_1_idx` ON `servers` (`ou_id` ASC)""")
cur.execute("""CREATE TABLE IF NOT EXISTS `provisioning_folders` (
`locale` VARCHAR(15) NOT NULL,
`mailbox` VARCHAR(128) NOT NULL DEFAULT "OpenChange Mailbox: %s",
`deferred_action` VARCHAR(128) NOT NULL DEFAULT "Deferred Action",
`spooler_queue` VARCHAR(128) NOT NULL DEFAULT "Spooler Queue",
`common_views` VARCHAR(128) NOT NULL DEFAULT "Common Views",
`schedule` VARCHAR(128) NOT NULL DEFAULT "Schedule",
`finder` VARCHAR(128) NOT NULL DEFAULT "Finder",
`views` VARCHAR(128) NOT NULL DEFAULT "Views",
`shortcuts` VARCHAR(128) NOT NULL DEFAULT "Shortcuts",
`reminders` VARCHAR(128) NOT NULL DEFAULT "Reminders",
`todo` VARCHAR(128) NOT NULL DEFAULT "To-Do",
`tracked_mail_processing` VARCHAR(128) NOT NULL DEFAULT "Tracked Mail Processing",
`top_info_store` VARCHAR(128) NOT NULL DEFAULT "Top of Information Store",
`inbox` VARCHAR(128) NOT NULL DEFAULT "Inbox",
`outbox` VARCHAR(128) NOT NULL DEFAULT "Outbox",
`sent_items` VARCHAR(128) NOT NULL DEFAULT "Sent Items",
`deleted_items` VARCHAR(128) NOT NULL DEFAULT "Deleted Items",
PRIMARY KEY (`locale`)
) ENGINE = InnoDB""")
cur.execute("""INSERT INTO `provisioning_folders` SET locale = 'en'""")
cur.execute("""CREATE TABLE IF NOT EXISTS `provisioning_special_folders` (
`locale` VARCHAR(15) NOT NULL,
`drafts` VARCHAR(128) NOT NULL DEFAULT "Drafts",
`calendar` VARCHAR(128) NOT NULL DEFAULT "Calendar",
`contacts` VARCHAR(128) NOT NULL DEFAULT "Contacts",
`tasks` VARCHAR(128) NOT NULL DEFAULT "Tasks",
`notes` VARCHAR(128) NOT NULL DEFAULT "Notes",
`journal` VARCHAR(128) NOT NULL DEFAULT "Journal",
PRIMARY KEY (`locale`)
) ENGINE = InnoDB""")
cur.execute("""INSERT INTO `provisioning_special_folders` SET locale = 'en'""")
@classmethod
def unapply(cls, cur):
for query in ("DROP TABLE provisioning_special_folders",
"DROP TABLE provisioning_folders",
"DROP TABLE servers",
"DROP TABLE folders_properties",
"DROP TABLE mailboxes_properties",
"DROP TABLE messages_properties",
"DROP TABLE messages",
"DROP TABLE folders",
"DROP TABLE mailboxes",
"DROP TABLE public_folders",
"DROP TABLE organizational_units"):
cur.execute(query)
@migration('openchangedb', 2)
class ReplicaMappingSchemaMigration(Migration):
description = 'Replica Id - GUID mapping schema'
@classmethod
def apply(cls, cur, **kwargs):
cur.execute("""CREATE TABLE IF NOT EXISTS `replica_mapping` (
`mailbox_id` BIGINT UNSIGNED NOT NULL,
`replica_id` INT UNSIGNED NOT NULL,
`replica_guid` VARCHAR(36) NOT NULL,
CONSTRAINT `fk_replica_mapping_mailbox_id`
FOREIGN KEY (`mailbox_id`)
REFERENCES `mailboxes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB""")
cur.execute("""CREATE UNIQUE INDEX `fk_replica_mapping_mailbox_repl_id`
ON `replica_mapping` (`mailbox_id` ASC, `replica_id` ASC)""")
cur.execute("""CREATE UNIQUE INDEX `fk_replica_mapping_mailbox_repl_guid`
ON `replica_mapping` (`mailbox_id` ASC, `replica_guid` ASC)""")
@classmethod
def unapply(cls, cur, **kwargs):
cur.execute("DROP TABLE `replica_mapping`")
@migration('openchangedb', 3)
class ReplicaMappingDataMigration(Migration):
description = 'Replica Id - GUID mapping data from TDB files'
@classmethod
def apply(cls, cur, **kwargs):
# Mimetise what mapistore_interface.c (mapistore_init) does
# to get the mapping path
if 'lp' in kwargs:
mapping_path = kwargs['lp'].private_path("mapistore")
else:
lp = LoadParm()
lp.load_default()
mapping_path = lp.private_path("mapistore")
if mapping_path is None:
return
cur.execute("START TRANSACTION")
try:
# Get all mailboxes
cur.execute("SELECT name FROM mailboxes")
for row in cur.fetchall():
username = row[0]
path = "{0}/{1}/replica_mapping.tdb".format(mapping_path, username)
try:
tdb_file = tdb.Tdb(path, 0, tdb.DEFAULT, os.O_RDONLY)
for k in tdb_file.iterkeys():
# Check if the key is an integer
try:
repl_id = int(k, base=16)
cls._insert_map(cur, username, repl_id, tdb_file[k])
except ValueError:
# Cannot convert to int, so no repl_id
continue
except IOError:
# Cannot read any replica mapping
continue
cur.execute("COMMIT")
except Exception as e:
print("Error migrating TDB files into the database {}, rollback".format(e), file=sys.stderr)
cur.execute("ROLLBACK")
raise
@classmethod
def _insert_map(cls, cur, username, repl_id, repl_guid):
cur.execute("""INSERT INTO replica_mapping (mailbox_id, replica_id, replica_guid)
SELECT m.id, %s, %s
FROM mailboxes m
WHERE m.name = %s""", (repl_id, repl_guid, username))
@classmethod
def unapply(cls, cur, **kwargs):
cur.execute("DELETE FROM `replica_mapping`")
| gpl-3.0 |
vdloo/raptiformica | vendor/consul-kv/tests/unit/api/test_mapping_to_txn_data.py | 1 | 1819 | from base64 import b64encode
from collections import OrderedDict
from consul_kv.api import _mapping_to_txn_data
from tests.testcase import TestCase
class TestMappingToTxnData(TestCase):
def setUp(self):
self.mapping = OrderedDict([
('some/key/1', 'some_value_1'),
('some/key/2', 'some_value_2')
])
self.value_fixture1 = 'c29tZV92YWx1ZV8x'
self.value_fixture2 = 'c29tZV92YWx1ZV8y'
def test_mapping_txn_data_value_fixtures_are_correctly_encoded(self):
def encode(value):
return b64encode(
value.encode('utf-8')
).decode('utf-8')
encoded_value1, encoded_value2 = tuple(map(encode, self.mapping.values()))
self.assertEqual(encoded_value1, self.value_fixture1)
self.assertEqual(encoded_value2, self.value_fixture2)
def test_mapping_to_txn_data_returns_txn_data_list_of_mapping(self):
ret = _mapping_to_txn_data(self.mapping)
expected_txn_data = [
{'KV': {'Key': 'some/key/1', 'Value': self.value_fixture1, 'Verb': 'set'}},
{'KV': {'Key': 'some/key/2', 'Value': self.value_fixture2, 'Verb': 'set'}}
]
self.assertCountEqual(ret, expected_txn_data)
def test_mapping_to_txn_data_can_deal_with_ints(self):
self.mapping['some/key/3'] = 123
_mapping_to_txn_data(self.mapping)
def test_mapping_to_txn_data_returns_txn_data_list_of_mapping_with_specified_operation(self):
ret = _mapping_to_txn_data(self.mapping, verb='cas')
expected_txn_data = [
{'KV': {'Key': 'some/key/1', 'Value': self.value_fixture1, 'Verb': 'cas'}},
{'KV': {'Key': 'some/key/2', 'Value': self.value_fixture2, 'Verb': 'cas'}}
]
self.assertCountEqual(ret, expected_txn_data)
| mit |
ryanmt/mitro | mitro-core/tools/verifydevice.py | 25 | 1548 | #!/usr/bin/python
import json
import subprocess
import sys
import urllib
def print_validation(type_string, arg_string):
v = json.loads(arg_string)
if type_string == 'new_device_login':
print v[0], v[1], v[2]
print 'https://localhost:8443/mitro-core/user/VerifyDevice?' + urllib.urlencode({
'user': v[0],
'token': v[1],
'token_signature': v[2],
})
elif type_string == 'address_verification':
print 'https://localhost:8443/mitro-core/user/VerifyAccount?' + urllib.urlencode({
'user': v[0],
'code': v[1],
})
elif type_string == 'new_user_invitation':
print 'http://www.mitro.co/install.html#' + urllib.urlencode({
'u': v[1],
'p': v[2],
})
else:
print 'unknown type', type_string
def main():
if len(sys.argv) == 3:
type_string = sys.argv[1]
arg_string = sys.argv[2]
print_validation(type_string, arg_string)
sys.exit(0)
command = ('psql', 'mitro', '--tuples-only', '--no-align', '-c', 'select type_string,arg_string from email_queue where attempted_time is null order by id;')
p = subprocess.Popen(command, stdout=subprocess.PIPE)
lines = p.stdout.read()
code = p.wait()
assert code == 0
for line in lines.split('\n'):
if line == '':
continue
type_string, arg_string = line.split('|')
print_validation(type_string, arg_string)
print
if __name__ == '__main__':
main()
| gpl-3.0 |
mw46d/ardupilot | Tools/autotest/param_metadata/param_parse.py | 6 | 9603 | #!/usr/bin/env python
from __future__ import print_function
import glob
import os
import re
import sys
from optparse import OptionParser
from param import (Library, Parameter, Vehicle, known_group_fields,
known_param_fields, required_param_fields, known_units)
from htmlemit import HtmlEmit
from rstemit import RSTEmit
from wikiemit import WikiEmit
from xmlemit import XmlEmit
from mdemit import MDEmit
parser = OptionParser("param_parse.py [options]")
parser.add_option("-v", "--verbose", dest='verbose', action='store_true', default=False, help="show debugging output")
parser.add_option("--vehicle", default='*', help="Vehicle type to generate for")
parser.add_option("--no-emit", dest='emit_params', action='store_false', default=True, help="don't emit parameter documention, just validate")
(opts, args) = parser.parse_args()
# Regular expressions for parsing the parameter metadata
prog_param = re.compile(r"@Param: (\w+).*((?:\n[ \t]*// @(\w+)(?:{([^}]+)})?: (.*))+)(?:\n\n|\n[ \t]+[A-Z])", re.MULTILINE)
# match e.g @Value: 0=Unity, 1=Koala, 17=Liability
prog_param_fields = re.compile(r"[ \t]*// @(\w+): (.*)")
# match e.g @Value{Copter}: 0=Volcano, 1=Peppermint
prog_param_tagged_fields = re.compile(r"[ \t]*// @(\w+){([^}]+)}: (.*)")
prog_groups = re.compile(r"@Group: *(\w+).*((?:\n[ \t]*// @(Path): (\S+))+)", re.MULTILINE)
apm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../')
vehicle_paths = glob.glob(apm_path + "%s/Parameters.cpp" % opts.vehicle)
extension = 'cpp'
if len(vehicle_paths) == 0:
vehicle_paths = glob.glob(apm_path + "%s/Parameters.pde" % opts.vehicle)
extension = 'pde'
vehicle_paths.sort(reverse=True)
vehicles = []
libraries = []
error_count = 0
def debug(str_to_print):
"""Debug output if verbose is set."""
if opts.verbose:
print(str_to_print)
def error(str_to_print):
"""Show errors."""
global error_count
error_count += 1
print(str_to_print)
truename_map = {
"APMrover2": "Rover",
"ArduSub": "Sub",
"ArduCopter": "Copter",
"ArduPlane": "Plane",
"AntennaTracker": "Tracker",
}
for vehicle_path in vehicle_paths:
name = os.path.basename(os.path.dirname(vehicle_path))
path = os.path.normpath(os.path.dirname(vehicle_path))
vehicles.append(Vehicle(name, path, truename_map[name]))
debug('Found vehicle type %s' % name)
if len(vehicles) > 1:
print("Single vehicle only, please")
sys.exit(1)
for vehicle in vehicles:
debug("===\n\n\nProcessing %s" % vehicle.name)
f = open(vehicle.path+'/Parameters.' + extension)
p_text = f.read()
f.close()
param_matches = prog_param.findall(p_text)
group_matches = prog_groups.findall(p_text)
debug(group_matches)
for group_match in group_matches:
l = Library(group_match[0])
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(l, field[0], field[1])
else:
error("group: unknown parameter metadata field '%s'" % field[0])
if not any(l.name == parsed_l.name for parsed_l in libraries):
libraries.append(l)
for param_match in param_matches:
p = Parameter(vehicle.name+":"+param_match[0])
debug(p.name + ' ')
field_text = param_match[1]
fields = prog_param_fields.findall(field_text)
field_list = []
for field in fields:
field_list.append(field[0])
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', "", field[1])
setattr(p, field[0], value)
else:
error("param: unknown parameter metadata field '%s'" % field[0])
for req_field in required_param_fields:
if req_field not in field_list:
error("missing parameter metadata field '%s' in %s" % (req_field, field_text))
vehicle.params.append(p)
debug("Processed %u params" % len(vehicle.params))
debug("Found %u documented libraries" % len(libraries))
alllibs = libraries[:]
vehicle = vehicles[0]
def process_library(vehicle, library, pathprefix=None):
'''process one library'''
paths = library.Path.split(',')
for path in paths:
path = path.strip()
debug("\n Processing file '%s'" % path)
if pathprefix is not None:
libraryfname = os.path.join(pathprefix, path)
elif path.find('/') == -1:
if len(vehicles) != 1:
print("Unable to handle multiple vehicles with .pde library")
continue
libraryfname = os.path.join(vehicles[0].path, path)
else:
libraryfname = os.path.normpath(os.path.join(apm_path + '/libraries/' + path))
if path and os.path.exists(libraryfname):
f = open(libraryfname)
p_text = f.read()
f.close()
else:
error("Path %s not found for library %s" % (path, library.name))
continue
param_matches = prog_param.findall(p_text)
debug("Found %u documented parameters" % len(param_matches))
for param_match in param_matches:
p = Parameter(library.name+param_match[0])
debug(p.name + ' ')
field_text = param_match[1]
fields = prog_param_fields.findall(field_text)
for field in fields:
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', library.name, field[1])
setattr(p, field[0], value)
else:
error("param: unknown parameter metadata field %s" % field[0])
debug("matching %s" % field_text)
fields = prog_param_tagged_fields.findall(field_text)
for field in fields:
only_for_vehicles = field[1].split(",")
only_for_vehicles = [ x.rstrip().lstrip() for x in only_for_vehicles ]
delta = set(only_for_vehicles) - set(truename_map.values())
if len(delta):
error("Unknown vehicles (%s)" % delta)
debug("field[0]=%s vehicle=%s truename=%s field[1]=%s only_for_vehicles=%s\n" % (field[0], vehicle.name,vehicle.truename,field[1], str(only_for_vehicles)))
if vehicle.truename not in only_for_vehicles:
continue;
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', library.name, field[2])
setattr(p, field[0], value)
else:
error("tagged param: unknown parameter metadata field '%s'" % field[0])
library.params.append(p)
group_matches = prog_groups.findall(p_text)
debug("Found %u groups" % len(group_matches))
debug(group_matches)
for group_match in group_matches:
group = group_match[0]
debug("Group: %s" % group)
l = Library(group)
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(l, field[0], field[1])
else:
error("unknown parameter metadata field '%s'" % field[0])
if not any(l.name == parsed_l.name for parsed_l in libraries):
l.name = library.name + l.name
debug("Group name: %s" % l.name)
process_library(vehicle, l, os.path.dirname(libraryfname))
alllibs.append(l)
for library in libraries:
debug("===\n\n\nProcessing library %s" % library.name)
if hasattr(library, 'Path'):
process_library(vehicle, library)
else:
error("Skipped: no Path found")
debug("Processed %u documented parameters" % len(library.params))
# sort libraries by name
alllibs = sorted(alllibs, key=lambda x : x.name)
libraries = alllibs
def is_number(numberString):
try:
float(numberString)
return True
except ValueError:
return False
def validate(param):
"""
Validates the parameter meta data.
"""
# Validate values
if (hasattr(param, "Range")):
rangeValues = param.__dict__["Range"].split(" ")
if (len(rangeValues) != 2):
error("Invalid Range values for %s" % (param.name))
return
min_value = rangeValues[0]
max_value = rangeValues[1]
if not is_number(min_value):
error("Min value not number: %s %s" % (param.name, min_value))
return
if not is_number(max_value):
error("Max value not number: %s %s" % (param.name, max_value))
return
# Validate units
if (hasattr(param, "Units")):
if (param.__dict__["Units"] != "") and (param.__dict__["Units"] not in known_units):
error("unknown units field '%s'" % param.__dict__["Units"])
for vehicle in vehicles:
for param in vehicle.params:
validate(param)
for library in libraries:
for param in library.params:
validate(param)
def do_emit(emit):
emit.set_annotate_with_vehicle(len(vehicles) > 1)
for vehicle in vehicles:
emit.emit(vehicle, f)
emit.start_libraries()
for library in libraries:
if library.params:
emit.emit(library, f)
emit.close()
if opts.emit_params:
do_emit(XmlEmit())
do_emit(WikiEmit())
do_emit(HtmlEmit())
do_emit(RSTEmit())
do_emit(MDEmit())
sys.exit(error_count)
| gpl-3.0 |
shakamunyi/docker-registry | depends/docker-registry-core/docker_registry/core/exceptions.py | 36 | 2180 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docker_registry.core.exceptions
~~~~~~~~~~~~~~~~~~~~~
Provide docker_registry exceptions to be used consistently in the drivers
and registry.
"""
__all__ = [
"UnspecifiedError",
"UsageError",
"NotImplementedError", "FileNotFoundError", "WrongArgumentsError",
"ConfigError",
"ConnectionError",
"UnreachableError", "MissingError", "BrokenError"
]
class UnspecifiedError(Exception):
"""Base class for all exceptions in docker_registry."""
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', 'No details')
super(UnspecifiedError, self).__init__(*args, **kwargs)
class UsageError(UnspecifiedError):
"""Exceptions related to use of the library.
Missing files, wrong argument type, etc.
"""
class NotImplementedError(UsageError):
"""The requested feature is not supported / not implemented."""
class FileNotFoundError(UsageError):
"""The requested (config) file is missing."""
class WrongArgumentsError(UsageError):
"""Expected arguments type not satisfied."""
class ConfigError(UsageError):
"""The provided configuration has problems."""
class ConnectionError(UnspecifiedError):
"""Network communication related errors all inherit this."""
class UnreachableError(ConnectionError):
"""The requested server is not reachable."""
class MissingError(ConnectionError):
"""The requested ressource is not to be found on the server."""
class BrokenError(ConnectionError):
"""Something died on our hands, that the server couldn't digest..."""
| apache-2.0 |
anryko/ansible | test/units/module_utils/basic/test__log_invocation.py | 169 | 1797 | # -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <jimi@sngx.net>
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
ARGS = dict(foo=False, bar=[1, 2, 3], bam="bam", baz=u'baz')
ARGUMENT_SPEC = dict(
foo=dict(default=True, type='bool'),
bar=dict(default=[], type='list'),
bam=dict(default="bam"),
baz=dict(default=u"baz"),
password=dict(default=True),
no_log=dict(default="you shouldn't see me", no_log=True),
)
@pytest.mark.parametrize('am, stdin', [(ARGUMENT_SPEC, ARGS)], indirect=['am', 'stdin'])
def test_module_utils_basic__log_invocation(am, mocker):
am.log = mocker.MagicMock()
am._log_invocation()
# Message is generated from a dict so it will be in an unknown order.
# have to check this manually rather than with assert_called_with()
args = am.log.call_args[0]
assert len(args) == 1
message = args[0]
assert len(message) == \
len('Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD')
assert message.startswith('Invoked with ')
assert ' bam=bam' in message
assert ' bar=[1, 2, 3]' in message
assert ' foo=False' in message
assert ' baz=baz' in message
assert ' no_log=NOT_LOGGING_PARAMETER' in message
assert ' password=NOT_LOGGING_PASSWORD' in message
kwargs = am.log.call_args[1]
assert kwargs == \
dict(log_args={
'foo': 'False',
'bar': '[1, 2, 3]',
'bam': 'bam',
'baz': 'baz',
'password': 'NOT_LOGGING_PASSWORD',
'no_log': 'NOT_LOGGING_PARAMETER',
})
| gpl-3.0 |
asm666/sympy | sympy/functions/elementary/tests/test_integers.py | 39 | 6533 | from sympy import Symbol, floor, nan, oo, E, symbols, ceiling, pi, Rational, \
Float, I, sin, exp, log, factorial, frac
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
i = Symbol('i', imaginary=True)
y = Symbol('y', real=True)
k, n = symbols('k,n', integer=True)
def test_floor():
assert floor(nan) == nan
assert floor(oo) == oo
assert floor(-oo) == -oo
assert floor(0) == 0
assert floor(1) == 1
assert floor(-1) == -1
assert floor(E) == 2
assert floor(-E) == -3
assert floor(2*E) == 5
assert floor(-2*E) == -6
assert floor(pi) == 3
assert floor(-pi) == -4
assert floor(Rational(1, 2)) == 0
assert floor(-Rational(1, 2)) == -1
assert floor(Rational(7, 3)) == 2
assert floor(-Rational(7, 3)) == -3
assert floor(Float(17.0)) == 17
assert floor(-Float(17.0)) == -17
assert floor(Float(7.69)) == 7
assert floor(-Float(7.69)) == -8
assert floor(I) == I
assert floor(-I) == -I
e = floor(i)
assert e.func is floor and e.args[0] == i
assert floor(oo*I) == oo*I
assert floor(-oo*I) == -oo*I
assert floor(2*I) == 2*I
assert floor(-2*I) == -2*I
assert floor(I/2) == 0
assert floor(-I/2) == -I
assert floor(E + 17) == 19
assert floor(pi + 2) == 5
assert floor(E + pi) == floor(E + pi)
assert floor(I + pi) == floor(I + pi)
assert floor(floor(pi)) == 3
assert floor(floor(y)) == floor(y)
assert floor(floor(x)) == floor(floor(x))
assert floor(x) == floor(x)
assert floor(2*x) == floor(2*x)
assert floor(k*x) == floor(k*x)
assert floor(k) == k
assert floor(2*k) == 2*k
assert floor(k*n) == k*n
assert floor(k/2) == floor(k/2)
assert floor(x + y) == floor(x + y)
assert floor(x + 3) == floor(x + 3)
assert floor(x + k) == floor(x + k)
assert floor(y + 3) == floor(y) + 3
assert floor(y + k) == floor(y) + k
assert floor(3 + I*y + pi) == 6 + floor(y)*I
assert floor(k + n) == k + n
assert floor(x*I) == floor(x*I)
assert floor(k*I) == k*I
assert floor(Rational(23, 10) - E*I) == 2 - 3*I
assert floor(sin(1)) == 0
assert floor(sin(-1)) == -1
assert floor(exp(2)) == 7
assert floor(log(8)/log(2)) != 2
assert int(floor(log(8)/log(2)).evalf(chop=True)) == 3
assert floor(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336800
assert (floor(y) <= y) == True
assert (floor(y) > y) == False
assert (floor(x) <= x).is_Relational # x could be non-real
assert (floor(x) > x).is_Relational
assert (floor(x) <= y).is_Relational # arg is not same as rhs
assert (floor(x) > y).is_Relational
def test_ceiling():
assert ceiling(nan) == nan
assert ceiling(oo) == oo
assert ceiling(-oo) == -oo
assert ceiling(0) == 0
assert ceiling(1) == 1
assert ceiling(-1) == -1
assert ceiling(E) == 3
assert ceiling(-E) == -2
assert ceiling(2*E) == 6
assert ceiling(-2*E) == -5
assert ceiling(pi) == 4
assert ceiling(-pi) == -3
assert ceiling(Rational(1, 2)) == 1
assert ceiling(-Rational(1, 2)) == 0
assert ceiling(Rational(7, 3)) == 3
assert ceiling(-Rational(7, 3)) == -2
assert ceiling(Float(17.0)) == 17
assert ceiling(-Float(17.0)) == -17
assert ceiling(Float(7.69)) == 8
assert ceiling(-Float(7.69)) == -7
assert ceiling(I) == I
assert ceiling(-I) == -I
e = ceiling(i)
assert e.func is ceiling and e.args[0] == i
assert ceiling(oo*I) == oo*I
assert ceiling(-oo*I) == -oo*I
assert ceiling(2*I) == 2*I
assert ceiling(-2*I) == -2*I
assert ceiling(I/2) == I
assert ceiling(-I/2) == 0
assert ceiling(E + 17) == 20
assert ceiling(pi + 2) == 6
assert ceiling(E + pi) == ceiling(E + pi)
assert ceiling(I + pi) == ceiling(I + pi)
assert ceiling(ceiling(pi)) == 4
assert ceiling(ceiling(y)) == ceiling(y)
assert ceiling(ceiling(x)) == ceiling(ceiling(x))
assert ceiling(x) == ceiling(x)
assert ceiling(2*x) == ceiling(2*x)
assert ceiling(k*x) == ceiling(k*x)
assert ceiling(k) == k
assert ceiling(2*k) == 2*k
assert ceiling(k*n) == k*n
assert ceiling(k/2) == ceiling(k/2)
assert ceiling(x + y) == ceiling(x + y)
assert ceiling(x + 3) == ceiling(x + 3)
assert ceiling(x + k) == ceiling(x + k)
assert ceiling(y + 3) == ceiling(y) + 3
assert ceiling(y + k) == ceiling(y) + k
assert ceiling(3 + pi + y*I) == 7 + ceiling(y)*I
assert ceiling(k + n) == k + n
assert ceiling(x*I) == ceiling(x*I)
assert ceiling(k*I) == k*I
assert ceiling(Rational(23, 10) - E*I) == 3 - 2*I
assert ceiling(sin(1)) == 1
assert ceiling(sin(-1)) == 0
assert ceiling(exp(2)) == 8
assert ceiling(-log(8)/log(2)) != -2
assert int(ceiling(-log(8)/log(2)).evalf(chop=True)) == -3
assert ceiling(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336801
assert (ceiling(y) >= y) == True
assert (ceiling(y) < y) == False
assert (ceiling(x) >= x).is_Relational # x could be non-real
assert (ceiling(x) < x).is_Relational
assert (ceiling(x) >= y).is_Relational # arg is not same as rhs
assert (ceiling(x) < y).is_Relational
def test_frac():
assert isinstance(frac(x), frac)
assert frac(n) == 0
assert frac(nan) == nan
assert frac(Rational(4, 3)) == Rational(1, 3)
assert frac(-Rational(4, 3)) == Rational(2, 3)
r = Symbol('r', real=True)
assert frac(I*r) == I*frac(r)
assert frac(1 + I*r) == I*frac(r)
assert frac(0.5 + I*r) == 0.5 + I*frac(r)
assert frac(n + I*r) == I*frac(r)
assert frac(n + I*k) == 0
assert frac(x + I*x) == frac(x + I*x)
assert frac(x + I*n) == frac(x)
assert frac(x).rewrite(floor) == x - floor(x)
def test_series():
x, y = symbols('x,y')
assert floor(x).nseries(x, y, 100) == floor(y)
assert ceiling(x).nseries(x, y, 100) == ceiling(y)
assert floor(x).nseries(x, pi, 100) == 3
assert ceiling(x).nseries(x, pi, 100) == 4
assert floor(x).nseries(x, 0, 100) == 0
assert ceiling(x).nseries(x, 0, 100) == 1
assert floor(-x).nseries(x, 0, 100) == -1
assert ceiling(-x).nseries(x, 0, 100) == 0
@XFAIL
def test_issue_4149():
assert floor(3 + pi*I + y*I) == 3 + floor(pi + y)*I
assert floor(3*I + pi*I + y*I) == floor(3 + pi + y)*I
assert floor(3 + E + pi*I + y*I) == 5 + floor(pi + y)*I
| bsd-3-clause |
ArcherSys/ArcherSys | MCEDIT/mcedit/stock-brushes/Replace.py | 8 | 1539 | from pymclevel.materials import Block
from editortools.brush import createBrushMask, createTileEntities
from pymclevel import block_fill
displayName = 'Replace'
mainBlock = 'Block To Replace With'
secondaryBlock = 'Block'
wildcardBlocks = ['Block']
def createInputs(self):
self.inputs = (
{'Hollow': False},
{'Noise': 100},
{'W': (3, 1, 4096), 'H': (3, 1, 4096), 'L': (3, 1, 4096)},
{'Block': materials.blockWithID(1, 0)},
{'Block To Replace With': materials.blockWithID(1, 0)},
{'Swap': tool.swap},
{'Minimum Spacing': 1}
)
def applyToChunkSlices(self, op, chunk, slices, brushBox, brushBoxThisChunk):
brushMask = createBrushMask(op.tool.getBrushSize(), op.options['Style'], brushBox.origin, brushBoxThisChunk, op.options['Noise'], op.options['Hollow'])
blocks = chunk.Blocks[slices]
data = chunk.Data[slices]
if op.options['Block'].wildcard:
print "Wildcard replace"
blocksToReplace = []
for i in range(16):
blocksToReplace.append(op.editor.level.materials.blockWithID(op.options['Block'].ID, i))
else:
blocksToReplace = [op.options['Block']]
replaceTable = block_fill.blockReplaceTable(blocksToReplace)
replaceMask = replaceTable[blocks, data]
brushMask &= replaceMask
chunk.Blocks[slices][brushMask] = op.options['Block To Replace With'].ID
chunk.Data[slices][brushMask] = op.options['Block To Replace With'].blockData
createTileEntities(op.options['Block To Replace With'], brushBoxThisChunk, chunk)
| mit |
pepeantena4040/MiSitioWeb | meta/lib/oeqa/runtime/connman.py | 5 | 1079 | import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("connman"):
skipModule("No connman package in image")
class ConnmanTest(oeRuntimeTest):
def service_status(self, service):
if oeRuntimeTest.hasFeature("systemd"):
(status, output) = self.target.run('systemctl status -l %s' % service)
return output
else:
return "Unable to get status or logs for %s" % service
@skipUnlessPassed('test_ssh')
def test_connmand_help(self):
(status, output) = self.target.run('/usr/sbin/connmand --help')
self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
@testcase(221)
@skipUnlessPassed('test_connmand_help')
def test_connmand_running(self):
(status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand')
if status != 0:
print self.service_status("connman")
self.fail("No connmand process running")
| gpl-2.0 |
huggingface/pytorch-transformers | src/transformers/models/vit/configuration_vit.py | 2 | 5272 | # coding=utf-8
# Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ViT model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
VIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"nielsr/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class ViTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.ViTModel`. It is used to
instantiate an ViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ViT `google/vit-base-patch16-224
<https://huggingface.co/google/vit-base-patch16-224>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
image_size (:obj:`int`, `optional`, defaults to :obj:`224`):
The size (resolution) of each image.
patch_size (:obj:`int`, `optional`, defaults to :obj:`16`):
The size (resolution) of each patch.
num_channels (:obj:`int`, `optional`, defaults to :obj:`3`):
The number of input channels.
Example::
>>> from transformers import ViTModel, ViTConfig
>>> # Initializing a ViT vit-base-patch16-224 style configuration
>>> configuration = ViTConfig()
>>> # Initializing a model from the vit-base-patch16-224 style configuration
>>> model = ViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "vit"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
is_encoder_decoder=False,
image_size=224,
patch_size=16,
num_channels=3,
**kwargs
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
| apache-2.0 |
phihag/youtube-dl | test/test_iqiyi_sdk_interpreter.py | 36 | 1104 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import IqiyiIE
class IqiyiIEWithCredentials(IqiyiIE):
def _get_login_info(self):
return 'foo', 'bar'
class WarningLogger(object):
def __init__(self):
self.messages = []
def warning(self, msg):
self.messages.append(msg)
def debug(self, msg):
pass
def error(self, msg):
pass
class TestIqiyiSDKInterpreter(unittest.TestCase):
def test_iqiyi_sdk_interpreter(self):
'''
Test the functionality of IqiyiSDKInterpreter by trying to log in
If `sign` is incorrect, /validate call throws an HTTP 556 error
'''
logger = WarningLogger()
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
ie._login()
self.assertTrue('unable to log in:' in logger.messages[0])
if __name__ == '__main__':
unittest.main()
| unlicense |
ntt-sic/heat | heat/common/exception.py | 1 | 10232 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat exception subclasses"""
import functools
import sys
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
from heat.openstack.common.py3kcompat import urlutils
_FATAL_EXCEPTION_FORMAT_ERRORS = False
logger = logging.getLogger(__name__)
class RedirectException(Exception):
def __init__(self, url):
self.url = urlutils.urlparse(url)
class KeystoneError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "Code: %s, message: %s" % (self.code, self.message)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
# TODO(johannes): Also, it would be nice to use
# utils.save_and_reraise_exception() without an import loop
def inner(f):
def wrapped(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
# Save exception since it can be clobbered during processing
# below before we can re-raise
exc_info = sys.exc_info()
if notifier:
payload = dict(args=args, exception=e)
payload.update(kw)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(publisher_id, temp_type, temp_level,
payload)
# re-raise original exception since it may have been clobbered
raise exc_info[0], exc_info[1], exc_info[2]
return functools.wraps(f)(wrapped)
return inner
class HeatException(Exception):
"""Base Heat Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
self.message = self.msg_fmt % kwargs
except KeyError:
exc_info = sys.exc_info()
#kwargs doesn't match a variable in the message
#log the issue and the kwargs
logger.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
logger.error("%s: %s" % (name, value))
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise exc_info[0], exc_info[1], exc_info[2]
def __str__(self):
return str(self.message)
def __unicode__(self):
return unicode(self.message)
class MissingCredentialError(HeatException):
msg_fmt = _("Missing required credential: %(required)s")
class BadAuthStrategy(HeatException):
msg_fmt = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class AuthBadRequest(HeatException):
msg_fmt = _("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(HeatException):
msg_fmt = _("Auth service at URL %(url)s not found.")
class AuthorizationFailure(HeatException):
msg_fmt = _("Authorization failed.")
class NotAuthenticated(HeatException):
msg_fmt = _("You are not authenticated.")
class Forbidden(HeatException):
msg_fmt = _("You are not authorized to complete this action.")
#NOTE(bcwaldon): here for backwards-compatability, need to deprecate.
class NotAuthorized(Forbidden):
msg_fmt = _("You are not authorized to complete this action.")
class Invalid(HeatException):
msg_fmt = _("Data supplied was not valid: %(reason)s")
class AuthorizationRedirect(HeatException):
msg_fmt = _("Redirecting to %(uri)s for authorization.")
class RequestUriTooLong(HeatException):
msg_fmt = _("The URI was too long.")
class MaxRedirectsExceeded(HeatException):
msg_fmt = _("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(HeatException):
msg_fmt = _("Received invalid HTTP redirect.")
class RegionAmbiguity(HeatException):
msg_fmt = _("Multiple 'image' service matches for region %(region)s. This "
"generally means that a region is required and you have not "
"supplied one.")
class UserParameterMissing(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not provided.")
class UnknownUserParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not defined in template.")
class InvalidTemplateParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) has no attributes.")
class InvalidTemplateAttribute(HeatException):
msg_fmt = _("The Referenced Attribute (%(resource)s %(key)s)"
" is incorrect.")
class InvalidTemplateReference(HeatException):
msg_fmt = _("The specified reference \"%(resource)s\" (in %(key)s)"
" is incorrect.")
class UserKeyPairMissing(HeatException):
msg_fmt = _("The Key (%(key_name)s) could not be found.")
class FlavorMissing(HeatException):
msg_fmt = _("The Flavor ID (%(flavor_id)s) could not be found.")
class ImageNotFound(HeatException):
msg_fmt = _("The Image (%(image_name)s) could not be found.")
class PhysicalResourceNameAmbiguity(HeatException):
msg_fmt = _(
"Multiple physical resources were found with name (%(name)s).")
class InvalidTenant(HeatException):
msg_fmt = _("Searching Tenant %(target)s "
"from Tenant %(actual)s forbidden.")
class StackNotFound(HeatException):
msg_fmt = _("The Stack (%(stack_name)s) could not be found.")
class StackExists(HeatException):
msg_fmt = _("The Stack (%(stack_name)s) already exists.")
class StackValidationFailed(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotFound(HeatException):
msg_fmt = _("The Resource (%(resource_name)s) could not be found "
"in Stack %(stack_name)s.")
class ResourceTypeNotFound(HeatException):
msg_fmt = _("The Resource Type (%(type_name)s) could not be found.")
class ResourceNotAvailable(HeatException):
msg_fmt = _("The Resource (%(resource_name)s) is not available.")
class PhysicalResourceNotFound(HeatException):
msg_fmt = _("The Resource (%(resource_id)s) could not be found.")
class WatchRuleNotFound(HeatException):
msg_fmt = _("The Watch Rule (%(watch_name)s) could not be found.")
class ResourceFailure(HeatException):
msg_fmt = _("%(exc_type)s: %(message)s")
def __init__(self, exception, resource, action=None):
if isinstance(exception, ResourceFailure):
exception = getattr(exception, 'exc', exception)
self.exc = exception
self.resource = resource
self.action = action
exc_type = type(exception).__name__
super(ResourceFailure, self).__init__(exc_type=exc_type,
message=str(exception))
class NotSupported(HeatException):
msg_fmt = _("%(feature)s is not supported.")
class ResourcePropertyConflict(HeatException):
msg_fmt = _('Cannot define the following properties at the same time: %s.')
def __init__(self, *args):
self.msg_fmt = self.msg_fmt % ", ".join(args)
super(ResourcePropertyConflict, self).__init__()
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions so they can be handled by the webob fault
application in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
class EgressRuleNotAllowed(HeatException):
msg_fmt = _("Egress rules are only allowed when "
"Neutron is used and the 'VpcId' property is set.")
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class NotFound(Error):
pass
class InvalidContentType(HeatException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(HeatException):
msg_fmt = _('Request limit exceeded: %(message)s')
class StackResourceLimitExceeded(HeatException):
msg_fmt = _('Maximum resources per stack exceeded.')
class ActionInProgress(HeatException):
msg_fmt = _("Stack %(stack_name)s already has an action (%(action)s) "
"in progress.")
class SoftwareConfigMissing(HeatException):
msg_fmt = _("The config (%(software_config_id)s) could not be found.")
class StopActionFailed(HeatException):
msg_fmt = _("Failed to stop stack (%(stack_name)s) on other engine "
"(%(engine_id)s)")
| apache-2.0 |
bwrc/export2hdf5 | export2hdf5/utilities_neurone.py | 1 | 13284 | # This file is part of export2hdf5
#
# Copyright 2017
# Andreas Henelius <andreas.henelius@ttl.fi>,
# Finnish Institute of Occupational Health
#
# This code is released under the MIT License
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details.
"""
This module contains functions for reading data from a
Bittium NeurOne device. This module currently supports
reading of data and events if the data has been recorded
in one session.
"""
import numpy as np
import xml.etree.ElementTree
from os import path
from construct import Struct, Int32sl, Int64ul
from datetime import datetime
def read_neurone_protocol(fpath):
"""
Read the measurement protocol from an XML file.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
Returns:
- a dictionary containing (i) the names of the channels
in the recording and (ii) meta information
(recording start/stop times, sampling rate).
{"meta" : <dict with metadata>,
"channels" : <array with channel names>}
"""
# Define filename
fname_protocol = path.join(fpath, "Protocol.xml")
fname_session = path.join(fpath, "Session.xml")
# --------------------------------------------------
# Read the protocol data
# --------------------------------------------------
# Define the XML namespace as a shorthand
ns = {'xmlns': 'http://www.megaemg.com/DataSetGeneralProtocol.xsd'}
# Get channel names and organise them according to their
# physical order (InputNumber), which is the order
# in which the channels are being sampled.
docroot = xml.etree.ElementTree.parse(fname_protocol).getroot()
channels = docroot.findall("xmlns:TableInput", namespaces=ns)
channel_names = ['']*len(channels)
for i,ch in enumerate(channels):
channel_names[i] = (int(ch.findall("xmlns:PhysicalInputNumber", namespaces=ns)[0].text), ch.findall("xmlns:Name", namespaces=ns)[0].text)
channel_names = [i for _,i in sorted(channel_names)]
# Get the sampling rate
sampling_rate = int(docroot.findall("xmlns:TableProtocol", namespaces=ns)[0].findall("xmlns:ActualSamplingFrequency", namespaces=ns)[0].text)
# --------------------------------------------------
# Read the session data
# --------------------------------------------------
# Define the XML namespace as a shorthand
ns2 = {'xmlns': 'http://www.megaemg.com/DataSetGeneralSession.xsd'}
# Get channel names and organise them according to their
# physical order (InputNumber), which is the order
# in which the channels are being sampled.
docroot = xml.etree.ElementTree.parse(fname_session).getroot()
session = docroot.findall("xmlns:TableSession", namespaces=ns2)
time_start = session[0].findall("xmlns:StartDateTime", namespaces=ns2)[0].text
time_stop = session[0].findall("xmlns:StopDateTime", namespaces=ns2)[0].text
# --------------------------------------------------
# Package the information
# --------------------------------------------------
meta = {}
tstart = time_start[0:time_start.index('+')]
tstop = time_stop[0:time_stop.index('+')]
if (len(tstart) > 26):
tstart = tstart[0:26]
if (len(tstop) > 26):
tstop = tstop[0:26]
meta["time_start"] = datetime.strptime(tstart, "%Y-%m-%dT%H:%M:%S.%f")
meta["time_stop"] = datetime.strptime(tstop, "%Y-%m-%dT%H:%M:%S.%f")
meta["sampling_rate"] = sampling_rate
return {'channels' : channel_names, 'meta' : meta}
def read_neurone_data(fpath, session_phase = 1, protocol = None):
"""
Read the NeurOne signal data from a binary file.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
- session_phase :
The phase of the measurement. Currently
only reading of the first phase (1) is
supported.
- protocol :
The dictionary obtained using the function
read_neurone_protocol. This argument is optional
and if not given, the protocol is automatically read.
Returns:
- A numpy ndarray with the data, where each columns stores
the data for one channel.
"""
fname = path.join(fpath, str(session_phase), str(session_phase)+'.bin')
# Read the protocol unless provided
if protocol is None:
protocol = read_neurone_protocol(fpath)
# Determine number of samples to read
f_info = path.getsize(fname)
n_channels = len(protocol['channels'])
n_samples = int(f_info / 4 / n_channels)
# Read the data and store the data
# in an ndarray
with open(fname, mode='rb') as file:
data = np.fromfile(fname, dtype='<i4')
data.shape = (n_samples, n_channels)
return data
def get_n1_event_format():
"""
Define the format for the events in a neurone recording.
Arguments: None.
Returns:
- A Struct (from the construct library) describing the
event format.
"""
# Define the data format of the events
return Struct(
"Revision" / Int32sl,
"RFU1" / Int32sl,
"Type" / Int32sl,
"SourcePort" / Int32sl,
"ChannelNumber" / Int32sl,
"Code" / Int32sl,
"StartSampleIndex" / Int64ul,
"StopSampleIndex" / Int64ul,
"DescriptionLength" / Int64ul,
"DescriptionOffset" / Int64ul,
"DataLength" / Int64ul,
"DataOffset" / Int64ul,
"RFU2" / Int32sl,
"RFU3" / Int32sl,
"RFU4" / Int32sl,
"RFU5" / Int32sl)
def read_neurone_events(fpath, session_phase = 1, sampling_rate = None):
"""
Read the NeurOne events from a binary file.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
- sampling_rate :
The sampling rate of the recording.
This argument is optional and if not given,
the protocol is automatically read.
- session_phase :
The phase of the measurement. Currently
only reading of the first phase (1) is
supported.
Returns:
- A dict containing the events and the data type for the events.
{"events" : <numpy structured array with the events>,
"events_dtype" : <array with the numpy dtype for the events>}
"""
fname = path.join(fpath, str(session_phase), "events.bin")
# Get the sampling rate unless provided
if sampling_rate is None:
protocol = read_neurone_protocol(fpath)
sampling_rate = protocol['meta']['sampling_rate']
# Determine number of events
f_info = path.getsize(fname)
n_events = int(f_info / 88)
events = [''] * n_events
# Read events in chunks of 88 bytes and unpack
# also add start / stop time for each event
# and remove 'reserved for future use' (RFU) fields
format = get_n1_event_format()
with open(fname, mode='rb') as file:
for i in range(n_events):
events[i] = format.parse(file.read(88))
events[i]['StartTime'] = events[i]['StartSampleIndex'] / sampling_rate
events[i]['StopTime'] = events[i]['StopSampleIndex'] / sampling_rate
for j in range(5):
del events[i]['RFU' + str(j+1)]
# Create a numpy structured array from the events
events_dtype = np.dtype([("Revision" , np.int32),
("Type" , np.int32),
("SourcePort" , np.int32),
("ChannelNumber" , np.int32),
("Code" , np.int32),
("StartSampleIndex" , np.int64),
("StopSampleIndex" , np.int64),
("DescriptionLength" , np.int64),
("DescriptionOffset" , np.int64),
("DataLength" , np.int64),
("DataOffset" , np.int64),
("StartTime" , np.int64),
("StopTime" , np.int64) ])
# convert array of event dicts to an array of tuples
keylist = [k for k, v in events[0].items()]
tmp = [tuple([e[k] for k in keylist]) for e in events]
events = np.array(tmp, dtype = events_dtype)
return {'events' : events, 'dtype' : events_dtype}
def write_neurone_events(fname, events):
"""
Write neurone events.
Arguments:
- fname : the file to write the events to (will be overwritten)
- events : an array of dicts, each dict containing one event.
Each event has the following fields (here with example data):
Revision = 5
Type = 4
SourcePort = 3
ChannelNumber = 0
Code = 2
StartSampleIndex = 224042
StopSampleIndex = 224042
DescriptionLength = 0
DescriptionOffset = 0
DataLength = 0
DataOffset = 0
Note that fields RFU0 to RFU5 are automatically added.
Returns:
- nothing
"""
format = get_n1_event_format()
with open(fname, mode='wb') as file:
for e in events:
for j in range(5):
e['RFU' + str(j+1)] = 0
file.write(format.build(e))
def read_neurone(fpath):
"""
Read the neurone data.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
Returns:
- a dictionary containing the data, events and the
data type (numpy dtype) for the events.
{"data" : <the signal data>,
"events" : <the events>,
"events_dtype" : <event data type>}
"""
# Read the protocol
protocol = read_neurone_protocol(fpath)
# Read the signal data
data = read_neurone_data(fpath, session_phase = 1, protocol = protocol)
# Read the events
events = read_neurone_events(fpath, session_phase = 1, sampling_rate = float(protocol['meta']['sampling_rate']))
# Create a time vector
timevec = np.array(range(data.shape[0])) / float(protocol['meta']['sampling_rate'])
out = [0] * len(protocol["channels"])
for i, label in enumerate(protocol["channels"]):
data_out = {}
data_out[label] = data[:,i]
data_out["time"] = timevec
out[i] = {"meta" : protocol["meta"], "data" : data_out}
return {"data" : out, "events" : events['events'], "events_dtype" : events['dtype']}
def read_neurone_data_hdf5(fpath):
"""
Read the neurone data in a format compatible with the
HDF5-exporting function export_hdf5.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
Returns:
- a list of dictionaries, where each dictionary
represents a feature as a time series ("signal")
{"meta" : <dict with metadata>,
"data" : {"time" : [...], "<channelname" : [...] }}
"""
# Read the protocol
protocol = read_neurone_protocol(fpath)
# Read the signal data
data = read_neurone_data(fpath, session_phase = 1, protocol = protocol)
# Create a time vector
timevec = np.array(range(data.shape[0])) / float(protocol['meta']['sampling_rate'])
out = [0] * len(protocol["channels"])
for i, label in enumerate(protocol["channels"]):
data_out = {}
data_out[label] = data[:,i]
data_out["time"] = timevec
out[i] = {"meta" : protocol["meta"], "data" : data_out}
return out
def read_neurone_events_hdf5(fpath):
"""
Read the neurone events in a format compatible with the
HDF5-exporting function export_hdf5.
Arguments:
- fpath : the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
Returns:
- A dict containing the events and the data type for the events.
{"events" : <numpy structured array with the events>,
"events_dtype" : <array with the numpy dtype for the events>}
"""
# Read the protocol
protocol = read_neurone_protocol(fpath)
# Read the signal data
data = read_neurone_data(fpath, session_phase = 1, protocol = protocol)
# Read the events
events = read_neurone_events(fpath, session_phase = 1, sampling_rate = float(protocol['meta']['sampling_rate']))
return events
| mit |
zhongzw/skia-sdl | platform_tools/android/bin/download_utils.py | 149 | 8464 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library to assist automatically downloading files.
This library is used by scripts that download tarballs, zipfiles, etc. as part
of the build process.
"""
import hashlib
import http_download
import os.path
import re
import shutil
import sys
import time
import urllib2
SOURCE_STAMP = 'SOURCE_URL'
HASH_STAMP = 'SOURCE_SHA1'
# Designed to handle more general inputs than sys.platform because the platform
# name may come from the command line.
PLATFORM_COLLAPSE = {
'windows': 'windows',
'win32': 'windows',
'cygwin': 'windows',
'linux': 'linux',
'linux2': 'linux',
'linux3': 'linux',
'darwin': 'mac',
'mac': 'mac',
}
ARCH_COLLAPSE = {
'i386' : 'x86',
'i686' : 'x86',
'x86_64': 'x86',
'armv7l': 'arm',
}
class HashError(Exception):
def __init__(self, download_url, expected_hash, actual_hash):
self.download_url = download_url
self.expected_hash = expected_hash
self.actual_hash = actual_hash
def __str__(self):
return 'Got hash "%s" but expected hash "%s" for "%s"' % (
self.actual_hash, self.expected_hash, self.download_url)
def PlatformName(name=None):
if name is None:
name = sys.platform
return PLATFORM_COLLAPSE[name]
def ArchName(name=None):
if name is None:
if PlatformName() == 'windows':
# TODO(pdox): Figure out how to auto-detect 32-bit vs 64-bit Windows.
name = 'i386'
else:
import platform
name = platform.machine()
return ARCH_COLLAPSE[name]
def EnsureFileCanBeWritten(filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def WriteData(filename, data):
EnsureFileCanBeWritten(filename)
f = open(filename, 'wb')
f.write(data)
f.close()
def WriteDataFromStream(filename, stream, chunk_size, verbose=True):
EnsureFileCanBeWritten(filename)
dst = open(filename, 'wb')
try:
while True:
data = stream.read(chunk_size)
if len(data) == 0:
break
dst.write(data)
if verbose:
# Indicate that we're still writing.
sys.stdout.write('.')
sys.stdout.flush()
finally:
if verbose:
sys.stdout.write('\n')
dst.close()
def DoesStampMatch(stampfile, expected, index):
try:
f = open(stampfile, 'r')
stamp = f.read()
f.close()
if stamp.split('\n')[index] == expected:
return "already up-to-date."
elif stamp.startswith('manual'):
return "manual override."
return False
except IOError:
return False
def WriteStamp(stampfile, data):
EnsureFileCanBeWritten(stampfile)
f = open(stampfile, 'w')
f.write(data)
f.close()
def StampIsCurrent(path, stamp_name, stamp_contents, min_time=None, index=0):
stampfile = os.path.join(path, stamp_name)
# Check if the stampfile is older than the minimum last mod time
if min_time:
try:
stamp_time = os.stat(stampfile).st_mtime
if stamp_time <= min_time:
return False
except OSError:
return False
return DoesStampMatch(stampfile, stamp_contents, index)
def WriteSourceStamp(path, url):
stampfile = os.path.join(path, SOURCE_STAMP)
WriteStamp(stampfile, url)
def WriteHashStamp(path, hash_val):
hash_stampfile = os.path.join(path, HASH_STAMP)
WriteStamp(hash_stampfile, hash_val)
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if sys.platform in ('win32', 'cygwin'):
count = 0
while True:
try:
op(*args)
break
except Exception:
sys.stdout.write("FAILED: %s %s\n" % (op.__name__, repr(args)))
count += 1
if count < 5:
sys.stdout.write("RETRY: %s %s\n" % (op.__name__, repr(args)))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def MoveDirCleanly(src, dst):
RemoveDir(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveDir(path):
if os.path.exists(path):
Retry(shutil.rmtree, path)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
def _HashFileHandle(fh):
"""sha1 of a file like object.
Arguments:
fh: file handle like object to hash.
Returns:
sha1 as a string.
"""
hasher = hashlib.sha1()
try:
while True:
data = fh.read(4096)
if not data:
break
hasher.update(data)
finally:
fh.close()
return hasher.hexdigest()
def HashFile(filename):
"""sha1 a file on disk.
Arguments:
filename: filename to hash.
Returns:
sha1 as a string.
"""
fh = open(filename, 'rb')
return _HashFileHandle(fh)
def HashUrlByDownloading(url):
"""sha1 the data at an url.
Arguments:
url: url to download from.
Returns:
sha1 of the data at the url.
"""
try:
fh = urllib2.urlopen(url)
except:
sys.stderr.write("Failed fetching URL: %s\n" % url)
raise
return _HashFileHandle(fh)
# Attempts to get the SHA1 hash of a file given a URL by looking for
# an adjacent file with a ".sha1hash" suffix. This saves having to
# download a large tarball just to get its hash. Otherwise, we fall
# back to downloading the main file.
def HashUrl(url):
hash_url = '%s.sha1hash' % url
try:
fh = urllib2.urlopen(hash_url)
data = fh.read(100)
fh.close()
except urllib2.HTTPError, exn:
if exn.code == 404:
return HashUrlByDownloading(url)
raise
else:
if not re.match('[0-9a-f]{40}\n?$', data):
raise AssertionError('Bad SHA1 hash file: %r' % data)
return data.strip()
def SyncURL(url, filename=None, stamp_dir=None, min_time=None,
hash_val=None, keep=False, verbose=False, stamp_index=0):
"""Synchronize a destination file with a URL
if the URL does not match the URL stamp, then we must re-download it.
Arugments:
url: the url which will to compare against and download
filename: the file to create on download
path: the download path
stamp_dir: the filename containing the URL stamp to check against
hash_val: if set, the expected hash which must be matched
verbose: prints out status as it runs
stamp_index: index within the stamp file to check.
Returns:
True if the file is replaced
False if the file is not replaced
Exception:
HashError: if the hash does not match
"""
assert url and filename
# If we are not keeping the tarball, or we already have it, we can
# skip downloading it for this reason. If we are keeping it,
# it must exist.
if keep:
tarball_ok = os.path.isfile(filename)
else:
tarball_ok = True
# If we don't need the tarball and the stamp_file matches the url, then
# we must be up to date. If the URL differs but the recorded hash matches
# the one we'll insist the tarball has, then that's good enough too.
# TODO(mcgrathr): Download the .sha1sum file first to compare with
# the cached hash, in case --file-hash options weren't used.
if tarball_ok and stamp_dir is not None:
if StampIsCurrent(stamp_dir, SOURCE_STAMP, url, min_time):
if verbose:
print '%s is already up to date.' % filename
return False
if (hash_val is not None and
StampIsCurrent(stamp_dir, HASH_STAMP, hash_val, min_time, stamp_index)):
if verbose:
print '%s is identical to the up to date file.' % filename
return False
if verbose:
print 'Updating %s\n\tfrom %s.' % (filename, url)
EnsureFileCanBeWritten(filename)
http_download.HttpDownload(url, filename)
if hash_val:
tar_hash = HashFile(filename)
if hash_val != tar_hash:
raise HashError(actual_hash=tar_hash, expected_hash=hash_val,
download_url=url)
return True
| bsd-3-clause |
drawks/ansible | lib/ansible/modules/network/onyx/onyx_ptp_global.py | 59 | 6923 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_ptp_global
version_added: "2.8"
author: "Anas Badaha (@anasb)"
short_description: Configures PTP Global parameters
description:
- This module provides declarative management of PTP Global configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
ptp and ntp protocols cannot be enabled at the same time
options:
ptp_state:
description:
- PTP state.
choices: ['enabled', 'disabled']
default: enabled
ntp_state:
description:
- NTP state.
choices: ['enabled', 'disabled']
domain:
description:
- "set PTP domain number Range 0-127"
primary_priority:
description:
- "set PTP primary priority Range 0-225"
secondary_priority:
description:
- "set PTP secondary priority Range 0-225"
"""
EXAMPLES = """
- name: configure PTP
onyx_ptp_global:
ntp_state: enabled
ptp_state: disabled
domain: 127
primary_priority: 128
secondary_priority: 128
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- no ntp enable
- protocol ptp
- ptp domain 127
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxPtpGlobalModule(BaseOnyxModule):
def init_module(self):
""" initialize module
"""
element_spec = dict(
ntp_state=dict(choices=['enabled', 'disabled']),
ptp_state=dict(choices=['enabled', 'disabled'], default='enabled'),
domain=dict(type=int),
primary_priority=dict(type=int),
secondary_priority=dict(type=int)
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self._validate_param_values(self._required_config)
def _validate_param_values(self, obj, param=None):
super(OnyxPtpGlobalModule, self).validate_param_values(obj, param)
if obj['ntp_state'] == 'enabled' and obj['ptp_state'] == 'enabled':
self._module.fail_json(msg='PTP State and NTP State Can not be enabled at the same time')
def validate_domain(self, value):
if value and not 0 <= int(value) <= 127:
self._module.fail_json(msg='domain must be between 0 and 127')
def validate_primary_priority(self, value):
if value and not 0 <= int(value) <= 255:
self._module.fail_json(msg='Primary Priority must be between 0 and 255')
def validate_secondary_priority(self, value):
if value and not 0 <= int(value) <= 255:
self._module.fail_json(msg='Secondary Priority must be between 0 and 255')
def _set_ntp_config(self, ntp_config):
ntp_config = ntp_config[0]
if not ntp_config:
return
ntp_state = ntp_config.get('NTP enabled')
if ntp_state == "yes":
self._current_config['ntp_state'] = "enabled"
else:
self._current_config['ntp_state'] = "disabled"
def _set_ptp_config(self, ptp_config):
if ptp_config is None:
self._current_config['ptp_state'] = 'disabled'
else:
self._current_config['ptp_state'] = 'enabled'
self._current_config['domain'] = int(ptp_config['Domain'])
self._current_config['primary_priority'] = int(ptp_config['Priority1'])
self._current_config['secondary_priority'] = int(ptp_config['Priority2'])
def _show_ntp_config(self):
cmd = "show ntp configured"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def _show_ptp_config(self):
cmd = "show ptp clock"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
ntp_config = self._show_ntp_config()
self._set_ntp_config(ntp_config)
ptp_config = self._show_ptp_config()
self._set_ptp_config(ptp_config)
def generate_commands(self):
ntp_state = self._required_config.get("ntp_state")
if ntp_state == "enabled":
self._enable_ntp()
elif ntp_state == "disabled":
self._disable_ntp()
ptp_state = self._required_config.get("ptp_state", "enabled")
if ptp_state == "enabled":
self._enable_ptp()
else:
self._disable_ptp()
domain = self._required_config.get("domain")
if domain is not None:
curr_domain = self._current_config.get("domain")
if domain != curr_domain:
self._commands.append('ptp domain %d' % domain)
primary_priority = self._required_config.get("primary_priority")
if primary_priority is not None:
curr_primary_priority = self._current_config.get("primary_priority")
if primary_priority != curr_primary_priority:
self._commands.append('ptp priority1 %d' % primary_priority)
secondary_priority = self._required_config.get("secondary_priority")
if secondary_priority is not None:
curr_secondary_priority = self._current_config.get("secondary_priority")
if secondary_priority != curr_secondary_priority:
self._commands.append('ptp priority2 %d' % secondary_priority)
def _enable_ptp(self):
curr_ptp_state = self._current_config['ptp_state']
if curr_ptp_state == 'disabled':
self._commands.append('protocol ptp')
def _disable_ptp(self):
curr_ptp_state = self._current_config['ptp_state']
if curr_ptp_state == 'enabled':
self._commands.append('no protocol ptp')
def _enable_ntp(self):
curr_ntp_state = self._current_config.get('ntp_state')
if curr_ntp_state == 'disabled':
self._commands.append('ntp enable')
def _disable_ntp(self):
curr_ntp_state = self._current_config['ntp_state']
if curr_ntp_state == 'enabled':
self._commands.append('no ntp enable')
def main():
""" main entry point for module execution
"""
OnyxPtpGlobalModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
spadae22/odoo | addons/website_twitter/models/twitter.py | 376 | 5350 | from urllib2 import urlopen, Request, HTTPError
import base64
import json
import logging
import werkzeug
from openerp.osv import fields, osv
API_ENDPOINT = 'https://api.twitter.com'
API_VERSION = '1.1'
REQUEST_TOKEN_URL = '%s/oauth2/token' % API_ENDPOINT
REQUEST_FAVORITE_LIST_URL = '%s/%s/favorites/list.json' % (API_ENDPOINT, API_VERSION)
URLOPEN_TIMEOUT = 10
_logger = logging.getLogger(__name__)
class TwitterClient(osv.osv):
_inherit = "website"
_columns = {
'twitter_api_key': fields.char('Twitter API key', help="Twitter API Key"),
'twitter_api_secret': fields.char('Twitter API secret', help="Twitter API Secret"),
'twitter_screen_name': fields.char('Get favorites from this screen name'),
}
def _request(self, website, url, params=None):
"""Send an authenticated request to the Twitter API."""
access_token = self._get_access_token(website)
if params:
params = werkzeug.url_encode(params)
url = url + '?' + params
try:
request = Request(url)
request.add_header('Authorization', 'Bearer %s' % access_token)
return json.load(urlopen(request, timeout=URLOPEN_TIMEOUT))
except HTTPError, e:
_logger.debug("Twitter API request failed with code: %r, msg: %r, content: %r",
e.code, e.msg, e.fp.read())
raise
def _refresh_favorite_tweets(self, cr, uid, context=None):
''' called by cron job '''
website = self.pool['website']
ids = self.pool['website'].search(cr, uid, [('twitter_api_key', '!=', False),
('twitter_api_secret', '!=', False),
('twitter_screen_name', '!=', False)],
context=context)
_logger.debug("Refreshing tweets for website IDs: %r", ids)
website.fetch_favorite_tweets(cr, uid, ids, context=context)
def fetch_favorite_tweets(self, cr, uid, ids, context=None):
website_tweets = self.pool['website.twitter.tweet']
tweet_ids = []
for website in self.browse(cr, uid, ids, context=context):
if not all((website.twitter_api_key, website.twitter_api_secret,
website.twitter_screen_name)):
_logger.debug("Skip fetching favorite tweets for unconfigured website %s",
website)
continue
params = {'screen_name': website.twitter_screen_name}
last_tweet = website_tweets.search_read(
cr, uid, [('website_id', '=', website.id),
('screen_name', '=', website.twitter_screen_name)],
['tweet_id'],
limit=1, order='tweet_id desc', context=context)
if last_tweet:
params['since_id'] = int(last_tweet[0]['tweet_id'])
_logger.debug("Fetching favorite tweets using params %r", params)
response = self._request(website, REQUEST_FAVORITE_LIST_URL, params=params)
for tweet_dict in response:
tweet_id = tweet_dict['id'] # unsigned 64-bit snowflake ID
tweet_ids = website_tweets.search(cr, uid, [('tweet_id', '=', tweet_id)])
if not tweet_ids:
new_tweet = website_tweets.create(
cr, uid,
{
'website_id': website.id,
'tweet': json.dumps(tweet_dict),
'tweet_id': tweet_id, # stored in NUMERIC PG field
'screen_name': website.twitter_screen_name,
},
context=context)
_logger.debug("Found new favorite: %r, %r", tweet_id, tweet_dict)
tweet_ids.append(new_tweet)
return tweet_ids
def _get_access_token(self, website):
"""Obtain a bearer token."""
bearer_token_cred = '%s:%s' % (website.twitter_api_key, website.twitter_api_secret)
encoded_cred = base64.b64encode(bearer_token_cred)
request = Request(REQUEST_TOKEN_URL)
request.add_header('Content-Type',
'application/x-www-form-urlencoded;charset=UTF-8')
request.add_header('Authorization',
'Basic %s' % encoded_cred)
request.add_data('grant_type=client_credentials')
data = json.load(urlopen(request, timeout=URLOPEN_TIMEOUT))
access_token = data['access_token']
return access_token
class WebsiteTwitterTweet(osv.osv):
_name = "website.twitter.tweet"
_description = "Twitter Tweets"
_columns = {
'website_id': fields.many2one('website', string="Website"),
'screen_name': fields.char("Screen Name"),
'tweet': fields.text('Tweets'),
# Twitter IDs are 64-bit unsigned ints, so we need to store them in
# unlimited precision NUMERIC columns, which can be done with a
# float field. Used digits=(0,0) to indicate unlimited.
# Using VARCHAR would work too but would have sorting problems.
'tweet_id': fields.float("Tweet ID", digits=(0,0)), # Twitter
}
| agpl-3.0 |
h2oloopan/easymerge | EasyMerge/tests/scrapy/scrapy/contrib/linkextractors/htmlparser.py | 11 | 2468 | """
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from urlparse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| mit |
mattdm/dnf | dnf/transaction.py | 3 | 6891 | # transaction.py
# Managing the transaction to be passed to RPM.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
from functools import reduce
import operator
DOWNGRADE = 1
ERASE = 2
INSTALL = 3
REINSTALL = 4
UPGRADE = 5
class TransactionItem(object):
# :api
__slots__ = ('op_type', 'installed', 'erased', 'obsoleted', 'reason')
def __init__(self, op_type, installed=None, erased=None, obsoleted=None,
reason='unknown'):
self.op_type = op_type
self.installed = installed
self.erased = erased
self.obsoleted = list() if obsoleted is None else obsoleted
self.reason = reason # reason for it to be in the transaction set
@property
def active(self):
return self.installed if self.installed is not None else self.erased
@property
def active_history_state(self):
return (self.installed_history_state if self.installed is not None
else self.erased_history_state)
@property
def erased_history_state(self):
return self._HISTORY_ERASE_STATES[self.op_type]
_HISTORY_INSTALLED_STATES = {
DOWNGRADE : 'Downgrade',
INSTALL : 'Install',
REINSTALL : 'Reinstall',
UPGRADE : 'Update'
}
_HISTORY_ERASE_STATES = {
DOWNGRADE : 'Downgraded',
ERASE : 'Erase',
REINSTALL : 'Reinstalled',
UPGRADE : 'Updated'
}
def history_iterator(self):
if self.installed is not None:
yield(self.installed, self.installed_history_state)
if self.erased is not None:
yield(self.erased, self.erased_history_state)
if self.obsoleted:
yield(self.installed, self.obsoleting_history_state)
for obs in self.obsoleted:
yield(obs, self.obsoleted_history_state)
@property
def installed_history_state(self):
return self._HISTORY_INSTALLED_STATES[self.op_type]
def installs(self):
# :api
return [] if self.installed is None else [self.installed]
@property
def obsoleted_history_state(self):
return 'Obsoleted'
@property
def obsoleting_history_state(self):
return 'Obsoleting'
def propagated_reason(self, yumdb, installonlypkgs):
if self.reason == 'user':
return self.reason
if self.installed.name in installonlypkgs:
return 'user'
if self.op_type in [DOWNGRADE, REINSTALL, UPGRADE]:
previously = yumdb.get_package(self.erased).get('reason')
if previously:
return previously
return self.reason
def removes(self):
# :api
l = [] if self.erased is None else [self.erased]
return l + self.obsoleted
class Transaction(object):
# :api
def __init__(self):
# :api
self._tsis = []
def __iter__(self):
#: api
return iter(self._tsis)
def __len__(self):
return len(self._tsis)
def _items2set(self, extracting_fn):
lists = map(extracting_fn, self._tsis)
sets = map(set, lists)
return reduce(operator.or_, sets, set())
def add_downgrade(self, new, downgraded, obsoleted):
# :api
tsi = TransactionItem(DOWNGRADE, new, downgraded, obsoleted)
self._tsis.append(tsi)
def add_erase(self, erased):
# :api
tsi = TransactionItem(ERASE, erased=erased)
self._tsis.append(tsi)
def add_install(self, new, obsoleted, reason='unknown'):
# :api
tsi = TransactionItem(INSTALL, new, obsoleted=obsoleted,
reason=reason)
self._tsis.append(tsi)
def add_reinstall(self, new, reinstalled, obsoleted):
# :api
tsi = TransactionItem(REINSTALL, new, reinstalled, obsoleted)
self._tsis.append(tsi)
def add_upgrade(self, upgrade, upgraded, obsoleted):
# :api
tsi = TransactionItem(UPGRADE, upgrade, upgraded, obsoleted)
self._tsis.append(tsi)
def get_items(self, op_type):
return [tsi for tsi in self._tsis if tsi.op_type == op_type]
@property
def install_set(self):
# :api
fn = operator.methodcaller('installs')
return self._items2set(fn)
def populate_rpm_ts(self, ts):
"""Populate the RPM transaction set."""
for tsi in self._tsis:
if tsi.op_type == DOWNGRADE:
ts.addErase(tsi.erased.idx)
hdr = tsi.installed.header
ts.addInstall(hdr, tsi, 'i')
elif tsi.op_type == ERASE:
ts.addErase(tsi.erased.idx)
elif tsi.op_type == INSTALL:
hdr = tsi.installed.header
if tsi.obsoleted:
ts.addInstall(hdr, tsi, 'u')
else:
ts.addInstall(hdr, tsi, 'i')
elif tsi.op_type == REINSTALL:
# note: in rpm 4.12 there should not be set
# rpm.RPMPROB_FILTER_REPLACEPKG to work
ts.addReinstall(tsi.installed.header, tsi)
elif tsi.op_type == UPGRADE:
hdr = tsi.installed.header
ts.addInstall(hdr, tsi, 'u')
return ts
@property
def remove_set(self):
# :api
fn = operator.methodcaller('removes')
return self._items2set(fn)
def rpm_limitations(self):
""" Ensures all the members can be passed to rpm as they are to perform
the transaction.
"""
src_installs = [pkg for pkg in self.install_set if pkg.arch == 'src']
if len(src_installs):
return _("Will not install a source rpm package (%s).") % \
src_installs[0]
return None
def total_package_count(self):
return len(self.install_set | self.remove_set)
| gpl-2.0 |
whereismyjetpack/ansible | lib/ansible/modules/network/dellos6/dellos6_facts.py | 19 | 14318 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: dellos6_facts
version_added: "2.2"
author: "Abirami N(@abirami-n)"
short_description: Collect facts from remote devices running Dell OS6
description:
- Collects a base set of device facts from a remote device that
is running OS6. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: dellos6
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- dellos6_facts:
gather_subset: all
# Collect only the config and default facts
- dellos6_facts:
gather_subset:
- config
# Do not collect hardware facts
- dellos6_facts:
gather_subset:
- "!interfaces"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.network import NetworkModule
import ansible.module_utils.dellos6
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.commands()
class Default(FactsBase):
def commands(self):
self.runner.add_command('show version')
self.runner.add_command('show running-config | include hostname')
def populate(self):
data = self.runner.get_command('show version')
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
hdata =self.runner.get_command('show running-config | include hostname')
self.facts['hostname'] = self.parse_hostname(hdata)
def parse_version(self, data):
match = re.search(r'HW Version(.+)\s(\d+)', data)
if match:
return match.group(2)
def parse_hostname(self, data):
match = re.search(r'\S+\s(\S+)', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'System Model ID(.+)\s([A-Z0-9]*)\n', data, re.M)
if match:
return match.group(2)
def parse_image(self, data):
match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data)
if match:
return match.group(2)
def parse_serialnum(self, data):
match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data)
if match:
return match.group(2)
class Hardware(FactsBase):
def commands(self):
self.runner.add_command('show memory cpu')
def populate(self):
data = self.runner.get_command('show memory cpu')
match = re.findall('\s(\d+)\s', data)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[1]) / 1024
class Config(FactsBase):
def commands(self):
self.runner.add_command('show running-config')
def populate(self):
self.facts['config'] = self.runner.get_command('show running-config')
class Interfaces(FactsBase):
def commands(self):
self.runner.add_command('show interfaces')
self.runner.add_command('show interfaces status')
self.runner.add_command('show interfaces transceiver properties')
self.runner.add_command('show ip int')
self.runner.add_command('show lldp')
self.runner.add_command('show lldp remote-device all')
def populate(self):
vlan_info = dict()
data = self.runner.get_command('show interfaces')
interfaces = self.parse_interfaces(data)
desc = self.runner.get_command('show interfaces status')
properties = self.runner.get_command('show interfaces transceiver properties')
vlan = self.runner.get_command('show ip int')
vlan_info = self.parse_vlan(vlan)
self.facts['interfaces'] = self.populate_interfaces(interfaces,desc,properties)
self.facts['interfaces'].update(vlan_info)
if 'LLDP is not enabled' not in self.runner.get_command('show lldp'):
neighbors = self.runner.get_command('show lldp remote-device all')
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse_vlan(self,vlan):
facts =dict()
vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------')
for en in vlan_info_next.splitlines():
if en == '':
continue
match = re.search('^(\S+)\s+(\S+)\s+(\S+)', en)
intf = match.group(1)
if intf not in facts:
facts[intf] = list()
fact = dict()
matc=re.search('^([\w+\s\d]*)\s+(\S+)\s+(\S+)',en)
fact['address'] = matc.group(2)
fact['masklen'] = matc.group(3)
facts[intf].append(fact)
return facts
def populate_interfaces(self, interfaces, desc, properties):
facts = dict()
for key, value in interfaces.items():
intf = dict()
intf['description'] = self.parse_description(key,desc)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(key,properties)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(key,properties)
facts[key] = intf
return facts
def parse_neighbors(self, neighbors):
facts = dict()
neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------')
for en in neighbor_next.splitlines():
if en == '':
continue
intf = self.parse_lldp_intf(en.split()[0])
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(en.split()[4])
fact['port'] = self.parse_lldp_port(en.split()[3])
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line)
if match:
key = match.group(2)
parsed[key] = line
else:
parsed[key] += '\n%s' % line
return parsed
def parse_description(self, key, desc):
desc, desc_next = desc.split('--------- --------------- ------ ------- ---- ------ ----- -- -------------------')
desc_val, desc_info = desc_next.split('Oob')
for en in desc_val.splitlines():
if key in en:
match = re.search('^(\S+)\s+(\S+)', en)
if match.group(2) in ['Full','N/A']:
return "Null"
else:
return match.group(2)
def parse_macaddress(self, data):
match = re.search(r'Burned MAC Address(.+)\s([A-Z0-9.]*)\n', data)
if match:
return match.group(2)
def parse_mtu(self, data):
match = re.search(r'MTU Size(.+)\s(\d+)\n', data)
if match:
return int(match.group(2))
def parse_bandwidth(self, data):
match = re.search(r'Port Speed(.+)\s(\d+)\n', data)
if match:
return int(match.group(2))
def parse_duplex(self, data):
match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data)
if match:
return match.group(3)
def parse_mediatype(self, key, properties):
mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag=1
for en in mediatype_next.splitlines():
if key in en:
flag=0
match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en)
if match:
strval = match.group(3)
return match.group(3)
if flag==1:
return "null"
def parse_type(self, key, properties):
type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag=1
for en in type_val_next.splitlines():
if key in en:
flag=0
match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en)
if match:
strval = match.group(2)
return match.group(2)
if flag==1:
return "null"
def parse_lineprotocol(self, data):
match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data)
if match:
strval= match.group(2)
return strval.strip('/')
def parse_operstatus(self, data):
match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^([A-Za-z0-9/]*)', data)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'^([A-Za-z0-9]*)', data)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'^([A-Za-z0-9/]*)', data)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](runner))
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in facts.items():
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/prefetch_dataset_serialization_test.py | 21 | 1539 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the PrefetchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class PrefetchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def build_dataset(self, seed):
return dataset_ops.Dataset.range(100).prefetch(10).shuffle(
buffer_size=10, seed=seed, reshuffle_each_iteration=False)
def testCore(self):
num_outputs = 100
self.run_core_tests(lambda: self.build_dataset(10),
lambda: self.build_dataset(20), num_outputs)
if __name__ == "__main__":
test.main()
| apache-2.0 |
DLu/rosbridge_suite | rosbridge_server/src/tornado/iostream.py | 15 | 58750 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import bytes_type, errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
#######################################################
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
self._try_inline_read()
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes_type)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum read buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
for future in futures:
if (isinstance(self.error, (socket.error, IOError)) and
errno_from_exception(self.error) in _ERRNO_CONNRESET):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# estabilsh a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert self._read_future is None
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] in _ERRNO_CONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if e.args[0] not in _ERRNO_CONNRESET:
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either a dictionary
of options or an `ssl.SSLContext`. If a ``server_hostname``
is given, it will be used for certificate verification
(as configured in the ``ssl_options``).
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = {}
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options, server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno not in (errno.EINVAL, errno.ECONNRESET):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
if err.args[0] in _ERRNO_CONNRESET:
return self.close(exc_info=True)
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
# Note: Since we don't pass our callback argument along to
# super.connect(), this will always return a Future.
# This is harmless, but a bit less efficient than it could be.
return super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| bsd-3-clause |
DylannCordel/django-cms | cms/tests/widgets.py | 61 | 1211 | # -*- coding: utf-8 -*-
from cms.api import create_page
from cms.forms.widgets import PageSelectWidget, PageSmartLinkWidget
from cms.test_utils.testcases import CMSTestCase
class WidgetTestCases(CMSTestCase):
def test_pageselectwidget(self):
page = create_page("Test page", "nav_playground.html", "en")
page2 = create_page("Test page2", "nav_playground.html", "en")
widget = PageSelectWidget()
self.assertEqual(widget.decompress(page.pk), [1, page.pk, page.pk])
self.assertEqual(widget.decompress(page2.pk), [1, page2.pk, page2.pk])
self.assertIn("page_1", widget.render("page", ''))
self.assertIn("page_2", widget.render("page", ''))
self.assertFalse(widget._has_changed([0, 1], [0, 1]))
self.assertTrue(widget._has_changed('', [0, 1]))
self.assertTrue(widget._has_changed([0, 1], ''))
def test_pagesmartwidget(self):
create_page("Test page", "nav_playground.html", "en")
create_page("Test page2", "nav_playground.html", "en")
widget = PageSmartLinkWidget(ajax_view='admin:cms_page_get_published_pagelist')
widget.language = 'en'
self.assertIn('page', widget.render("page", ''))
| bsd-3-clause |
monash-merc/karaage | karaage/migrations/0001_initial.py | 3 | 20407 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import jsonfield.fields
import datetime
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('username', models.CharField(unique=True, max_length=255)),
('email', models.EmailField(max_length=75, null=True, db_index=True)),
('short_name', models.CharField(max_length=30)),
('full_name', models.CharField(max_length=60)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('saml_id', models.CharField(max_length=200, unique=True, null=True, editable=False, blank=True)),
('position', models.CharField(max_length=200, null=True, blank=True)),
('telephone', models.CharField(max_length=200, null=True, blank=True)),
('mobile', models.CharField(max_length=200, null=True, blank=True)),
('department', models.CharField(max_length=200, null=True, blank=True)),
('supervisor', models.CharField(max_length=100, null=True, blank=True)),
('title', models.CharField(blank=True, max_length=10, null=True, choices=[('', ''), ('Mr', 'Mr'), ('Mrs', 'Mrs'), ('Miss', 'Miss'), ('Ms', 'Ms'), ('Dr', 'Dr'), ('Prof', 'Prof'), ('A/Prof', 'A/Prof')])),
('address', models.CharField(max_length=200, null=True, blank=True)),
('city', models.CharField(max_length=100, null=True, blank=True)),
('postcode', models.CharField(max_length=8, null=True, blank=True)),
('state', models.CharField(blank=True, max_length=4, null=True, choices=[('', '--------'), ('ACT', 'ACT'), ('NSW', 'New South Wales'), ('NT', 'Northern Territory'), ('QLD', 'Queensland'), ('SA', 'South Australia'), ('TAS', 'Tasmania'), ('VIC', 'Victoria'), ('WA', 'Western Australia')])),
('country', models.CharField(blank=True, max_length=2, null=True, choices=[('AU', 'Australia'), ('NZ', 'New Zealand'), ('GB', 'United Kingdom'), ('DE', 'Germany'), ('US', 'United States'), ('', '--------------------------------------'), ('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua and Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AN', 'Netherlands Antilles'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AW', 'Aruba'), ('AX', 'Aland Islands'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia'), ('BR', 'Brazil'), ('BS', 'Bahamas'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CD', 'Congo'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', "Cote d'Ivoire"), ('CK', 'Cook Islands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cape Verde'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands'), ('FM', 'Micronesia'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('GA', 'Gabon'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GG', 'Guernsey'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard Island and McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IM', 'Isle of Man'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Iran'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JE', 'Jersey'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'Saint Kitts and Nevis'), ('KP', 'Korea'), ('KR', 'Korea'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libyan Arab Jamahiriya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova'), ('ME', 'Montenegro'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('MK', 'Macedonia'), ('ML', 'Mali'), ('MM', 'Myanmar'), ('MN', 'Mongolia'), ('MO', 'Macao'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Montserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'Saint Pierre and Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PS', 'Palestinian Territory'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RS', 'Serbia'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'Saint Helena'), ('SI', 'Slovenia'), ('SJ', 'Svalbard and Jan Mayen'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('ST', 'Sao Tome and Principe'), ('SV', 'El Salvador'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks and Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TL', 'Timor-Leste'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TR', 'Turkey'), ('TT', 'Trinidad and Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan'), ('TZ', 'Tanzania'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Vatican City'), ('VC', 'Saint Vincent and the Grenadines'), ('VE', 'Venezuela'), ('VG', 'Virgin Islands (British)'), ('VI', 'Virgin Islands (US)'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis and Futuna'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')])),
('website', models.URLField(null=True, blank=True)),
('fax', models.CharField(max_length=50, null=True, blank=True)),
('comment', models.TextField(null=True, blank=True)),
('date_approved', models.DateField(null=True, blank=True)),
('date_deleted', models.DateField(null=True, blank=True)),
('last_usage', models.DateField(null=True, blank=True)),
('expires', models.DateField(null=True, blank=True)),
('is_systemuser', models.BooleanField(default=False)),
('login_enabled', models.BooleanField(default=True)),
('legacy_ldap_password', models.CharField(max_length=128, null=True, blank=True)),
('approved_by', models.ForeignKey(related_name='user_approver', blank=True, to='karaage.Person', null=True)),
('deleted_by', models.ForeignKey(related_name='user_deletor', blank=True, to='karaage.Person', null=True)),
],
options={
'ordering': ['full_name', 'short_name'],
'db_table': 'person',
'verbose_name_plural': 'people',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=255)),
('foreign_id', models.CharField(help_text='The foreign identifier from the datastore.', max_length=255, unique=True, null=True)),
('date_created', models.DateField()),
('date_deleted', models.DateField(null=True, blank=True)),
('disk_quota', models.IntegerField(help_text='In GB', null=True, blank=True)),
('shell', models.CharField(max_length=50)),
('login_enabled', models.BooleanField(default=True)),
('extra_data', jsonfield.fields.JSONField(default={}, help_text='Datastore specific values should be stored in this field.')),
],
options={
'ordering': ['person'],
'db_table': 'account',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('foreign_id', models.CharField(help_text='The foreign identifier from the datastore.', max_length=255, unique=True, null=True)),
('description', models.TextField(null=True, blank=True)),
('extra_data', jsonfield.fields.JSONField(default={}, help_text='Datastore specific values should be stored in this field.')),
('members', models.ManyToManyField(related_name='groups', to='karaage.Person')),
],
options={
'ordering': ['name'],
'db_table': 'people_group',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Institute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('saml_entityid', models.CharField(max_length=200, unique=True, null=True, blank=True)),
('is_active', models.BooleanField(default=True)),
],
options={
'ordering': ['name'],
'db_table': 'institute',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstituteDelegate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('send_email', models.BooleanField()),
('institute', models.ForeignKey(to='karaage.Institute')),
('person', models.ForeignKey(to='karaage.Person')),
],
options={
'db_table': 'institutedelegate',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstituteQuota',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quota', models.DecimalField(max_digits=5, decimal_places=2)),
('cap', models.IntegerField(null=True, blank=True)),
('disk_quota', models.IntegerField(null=True, blank=True)),
('institute', models.ForeignKey(to='karaage.Institute')),
],
options={
'db_table': 'institute_quota',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now_add=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('user', models.ForeignKey(to='karaage.Person', null=True)),
],
options={
'ordering': ('-action_time', '-pk'),
'db_table': 'admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Machine',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('name', models.CharField(unique=True, max_length=50)),
('no_cpus', models.IntegerField()),
('no_nodes', models.IntegerField()),
('type', models.CharField(max_length=100)),
('start_date', models.DateField()),
('end_date', models.DateField(null=True, blank=True)),
('pbs_server_host', models.CharField(max_length=50, null=True, blank=True)),
('mem_per_core', models.IntegerField(help_text='In GB', null=True, blank=True)),
('scaling_factor', models.IntegerField(default=1)),
],
options={
'db_table': 'machine',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MachineCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('datastore', models.CharField(help_text='Modifying this value on existing categories will affect accounts created under the old datastore', max_length=255, choices=[('dummy', 'dummy'), ('ldap', 'ldap')])),
],
options={
'db_table': 'machine_category',
'verbose_name_plural': 'machine categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pid', models.CharField(unique=True, max_length=255)),
('name', models.CharField(max_length=200)),
('description', models.TextField(null=True, blank=True)),
('is_approved', models.BooleanField(default=False)),
('start_date', models.DateField(default=datetime.datetime.today)),
('end_date', models.DateField(null=True, blank=True)),
('additional_req', models.TextField(null=True, blank=True)),
('is_active', models.BooleanField(default=False)),
('date_approved', models.DateField(null=True, editable=False, blank=True)),
('date_deleted', models.DateField(null=True, editable=False, blank=True)),
('last_usage', models.DateField(null=True, editable=False, blank=True)),
('approved_by', models.ForeignKey(related_name='project_approver', blank=True, editable=False, to='karaage.Person', null=True)),
('deleted_by', models.ForeignKey(related_name='project_deletor', blank=True, editable=False, to='karaage.Person', null=True)),
('group', models.ForeignKey(to='karaage.Group')),
('institute', models.ForeignKey(to='karaage.Institute')),
('leaders', models.ManyToManyField(related_name='leads', to='karaage.Person')),
],
options={
'ordering': ['pid'],
'db_table': 'project',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectQuota',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('cap', models.IntegerField(null=True, blank=True)),
('machine_category', models.ForeignKey(to='karaage.MachineCategory')),
('project', models.ForeignKey(to='karaage.Project')),
],
options={
'db_table': 'project_quota',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='projectquota',
unique_together=set([('project', 'machine_category')]),
),
migrations.AddField(
model_name='machine',
name='category',
field=models.ForeignKey(to='karaage.MachineCategory'),
preserve_default=True,
),
migrations.AddField(
model_name='institutequota',
name='machine_category',
field=models.ForeignKey(to='karaage.MachineCategory'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='institutequota',
unique_together=set([('institute', 'machine_category')]),
),
migrations.AddField(
model_name='institute',
name='delegates',
field=models.ManyToManyField(related_name='delegate_for', null=True, through='karaage.InstituteDelegate', to='karaage.Person', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='institute',
name='group',
field=models.ForeignKey(to='karaage.Group'),
preserve_default=True,
),
migrations.AddField(
model_name='account',
name='default_project',
field=models.ForeignKey(blank=True, to='karaage.Project', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='account',
name='machine_category',
field=models.ForeignKey(to='karaage.MachineCategory'),
preserve_default=True,
),
migrations.AddField(
model_name='account',
name='person',
field=models.ForeignKey(to='karaage.Person'),
preserve_default=True,
),
migrations.AddField(
model_name='person',
name='institute',
field=models.ForeignKey(to='karaage.Institute'),
preserve_default=True,
),
]
| gpl-3.0 |
GNOME/orca | test/keystrokes/gtk-demo/role_combo_box2.py | 1 | 2176 | #!/usr/bin/python
"""Test of labelled combo box output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Printing"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Right"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Alt>o"))
sequence.append(utils.AssertPresentationAction(
"1. Combo box",
["BRAILLE LINE: 'gtk-demo application Print dialog Page Setup page tab Only print: All sheets combo box'",
" VISIBLE: 'Only print: All sheets combo box', cursor=1",
"SPEECH OUTPUT: 'Only print: All sheets combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"2. Where Am I",
["BRAILLE LINE: 'gtk-demo application Print dialog Page Setup page tab Only print: All sheets combo box'",
" VISIBLE: 'Only print: All sheets combo box', cursor=13",
"SPEECH OUTPUT: 'Only print: combo box.'",
"SPEECH OUTPUT: 'All sheets 1 of 3.'",
"SPEECH OUTPUT: 'Alt+O'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Change selection",
["BRAILLE LINE: 'gtk-demo application Print dialog Page Setup page tab Only print: Even sheets combo box Even sheets'",
" VISIBLE: 'Even sheets', cursor=1",
"SPEECH OUTPUT: 'Even sheets.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"4. Where Am I",
["BRAILLE LINE: 'gtk-demo application Print dialog Page Setup page tab Only print: Even sheets combo box Even sheets'",
" VISIBLE: 'Even sheets', cursor=1",
"SPEECH OUTPUT: 'Print dialog'",
"SPEECH OUTPUT: 'Page Setup page tab.'",
"SPEECH OUTPUT: 'Even sheets.'",
"SPEECH OUTPUT: '2 of 3'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 |
TsubasaK111/MeetingMaster | conference.py | 1 | 20723 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import json
import os
import time
import endpoints
from protorpc import messages, message_types, remote
from google.appengine.api import taskqueue
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from models import Profile, ProfileMiniForm, ProfileForm, TeeShirtSize
from models import Conference, ConferenceForm, ConferenceForms, ConferenceQueryForm, ConferenceQueryForms
from models import BooleanMessage, ConflictException, StringMessage
from utils import getUserId
from settings import WEB_CLIENT_ID, FRONTING_WEB_CLIENT_ID
import pdb, logging
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "announcements"
MEETING_DEFAULTS = { "city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ], }
OPERATORS = { 'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!=' }
FIELDS = { 'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees', }
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
webSafeKey=messages.StringField(1),
)
@endpoints.api( name='conference', version='v1', scopes=[EMAIL_SCOPE],
allowed_client_ids=[ WEB_CLIENT_ID,
FRONTING_WEB_CLIENT_ID,
API_EXPLORER_CLIENT_ID ], )
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile Objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, profile):
"""Copy relevant fields from Profile to ProfileForm."""
profileForm = ProfileForm()
for field in profileForm.all_fields():
if hasattr(profile, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr( profileForm,
field.name,
getattr(TeeShirtSize, getattr(profile, field.name)) )
else:
setattr( profileForm,
field.name,
getattr(profile, field.name) )
profileForm.check_initialized()
return profileForm
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# step 1: make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get user id by calling getUserId(user)
user_id = getUserId(user)
logging.debug( "user_id is: " )
logging.debug( user_id )
# create a new key of kind Profile from the id
profile_key = ndb.Key(Profile, user_id)
# get entity from datastore by using get() on the key
profile = profile_key.get()
# create a new Profile from logged in user data
# use user.nickname() to get displayName
# and user.email() to get mainEmail
if not profile:
profile = Profile(
userId = None,
key = profile_key,
displayName = user.nickname(),
mainEmail = user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
# save new profile to datastore
returned_profile_key = profile.put()
print "returned_profile_key is: "
print returned_profile_key
return profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
profile = self._getProfileFromUser()
# if saveProfile(), process user-modifiable fields
if save_request:
print "save_request!"
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
value = getattr(save_request, field)
if value:
print "setting attr in _doProfile!"
setattr(profile, field, str(value))
# remember, you have to .put() to finalize any changes made!^^
profile.put()
# return the ProfileForm
print "in _doProfile, profile is: "
print profile
return self._copyProfileToForm(profile)
# - - - Profile Endpoints - - - - - - - - - - - - - -
@endpoints.method( message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile' )
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method( ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile' )
def saveProfile(self, request):
"""Update & return user profile."""
# request contains only fields in the ProfileMiniForm.
# Pass this to _doProfile function, which will return profile info
# from the datastore.
print request
return self._doProfile(request)
# - - - Conference Objects - - - - - - - - - - - - - -
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# guard clauses / load prerequisites
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required!")
# Oh gawd, dict comprehensions! :(
# Copy ConferenceForm/ProtoRPC Message into 'data' dict
data = {
field.name: getattr(request, field.name) for field in request.all_fields()
}
del data['webSafeKey']
del data['organizerDisplayName']
logging.debug( "data was: " )
logging.debug( data )
# add default values for those mission (both data model & outbound Message)
for default in MEETING_DEFAULTS:
if data[default] in (None, []):
data[default] = MEETING_DEFAULTS[default]
setattr(request, default, MEETING_DEFAULTS[default])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be the same as maxAtendees on creation
# both for data model & outbound Message
if data['maxAttendees'] > 0:
data['seatsAvailable'] = data['maxAttendees']
setattr(request, "seatsAvailable", data["maxAttendees"] )
# make key from user ID
profile_key = ndb.Key(Profile, user_id)
# arbitrarily create new, unique id via ndb.model.alloc
conference_id = Conference.allocate_ids(size=1, parent=profile_key)[0]
# create a new key of kind Conference from the profile_key
conference_key = ndb.Key(Conference, conference_id, parent=profile_key)
data['key'] = conference_key
data['organizerUserId'] = request.organizerUserId = user_id
logging.debug( "data is: " )
logging.debug( data )
# create Conference & return modified ConferenceForm
Conference(**data).put()
taskqueue.add(
params={
'email': user.email(),
'conferenceInfo': repr(request)
},
url='/tasks/send_confirmation_email'
)
return request
def _copyConferenceToForm(self, conference, displayName):
"""Copy relevant fields from Conference to ConferenceForm"""
conferenceForm = ConferenceForm()
for field in conferenceForm.all_fields():
logging.debug("field name is: "+field.name)
if hasattr(conference, field.name):
# convert Date to date string: just copy others
if field.name.endswith('Date'):
setattr(conferenceForm, field.name, str(getattr(conference, field.name)))
else:
setattr(conferenceForm, field.name, getattr(conference, field.name))
elif field.name == "webSafeKey":
setattr(conferenceForm, field.name, conference.key.urlsafe())
if displayName:
setattr(conferenceForm, "organizerDisplayName", displayName)
logging.info( "conferenceForm is: " )
logging.info( conferenceForm )
conferenceForm.check_initialized()
return conferenceForm
# - - - Querying Helper Methods - - - - - - - - - - - - - -
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
conferences = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
conferences = conferences.order(Conference.name)
else:
conferences = conferences.\
order(ndb.GenericProperty(inequality_filter)).\
order(Conference.name)
for filtre in filters:
if filtre["field"] in ["month", "maxAttendees"]:
filtre["value"] = int(filtre["value"])
formatted_query = ndb.query.FilterNode( filtre["field"],
filtre["operator"],
filtre["value"] )
conferences = conferences.filter(formatted_query)
return conferences
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtre = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtre["field"] = FIELDS[filtre["field"]]
filtre["operator"] = OPERATORS[filtre["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtre["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtre["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtre["field"]
formatted_filters.append(filtre)
return (inequality_field, formatted_filters)
# - - - Conference Endpoints - - - - - - - - - - - - - -
@endpoints.method( ConferenceForm, ConferenceForm,
path='conference', http_method='POST', name='createConference' )
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method( ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences' )
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# fetch organizer displayName from profiles to return full ConferenceForms.
organizers = [ (ndb.Key(Profile, conference.organizerUserId)) \
for conference in conferences
]
profiles = ndb.get_multi(organizers)
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
# (another dict comprehension!)
return ConferenceForms(
items = [ self._copyConferenceToForm(
conference,
names[conference.organizerUserId]
) for conference in conferences
]
)
@endpoints.method( message_types.VoidMessage, ConferenceForms,
path="getConferencesCreated",
http_method="POST",
name="getConferencesCreated" )
def getConferencesCreated(self, request):
# guard clauses / load prerequisites
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
### They call this an "ancestor/descendant query":
conferencesOfUser = Conference.query(ancestor=ndb.Key(Profile, user_id))
return ConferenceForms(
items = [ self._copyConferenceToForm(conference, "") \
for conference in conferencesOfUser
]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
# TODO:
# step 1: get user profile
profile = self._getProfileFromUser()
# step 2: get conferenceKeysToAttend from profile.
# to make a ndb key from webSafe key you can use:
# ndb.Key(urlsafe=my_websafe_key_string)
webSafeConferenceKeys = profile.conferenceKeysToAttend
conferenceKeys = []
for webSafeKey in webSafeConferenceKeys:
conferenceKeys.append(ndb.Key(urlsafe=webSafeKey))
# step 3: fetch conferences from datastore.
# Use get_multi(array_of_keys) to fetch all keys at once.
# Do not fetch them one by one!
conferences = ndb.get_multi(conferenceKeys)
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items = [ self._copyConferenceToForm(conference, "") \
for conference in conferences ]
)
@endpoints.method( message_types.VoidMessage, ConferenceForms,
path="filterPlayground",
http_method="POST",
name="filterPlayground" )
def filterPlayground(self, request):
## Simple syntax for a filter query
filteredConferences = Conference.query(Conference.city == "London")
## AND syntax with sortBy
# filteredConferences = Conference.query(
# ndb.AND(
# Conference.city == "London",
# Conference.topics == "Medical Innovations"
# )).order(
# Conference.maxAttendees
# ).filter(
# Conference.month == 6
# ).filter(
# Conference.maxAttendees > 10
# )
return ConferenceForms(
items = [ self._copyConferenceToForm( conference, "" ) \
for conference in filteredConferences
]
)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, register=True):
"""Register or unregister user for selected conference."""
returnValue = None
profile = self._getProfileFromUser() # get user Profile
# check if conference exists given webSafeConfKey
# get conference; check that it exists
conferenceKey = request.webSafeKey
conference = ndb.Key(urlsafe=conferenceKey).get()
if not conference:
raise endpoints.NotFoundException(
'No conference found with key: %s' % conferenceKey)
# register
if register:
# check if user already registered otherwise add
if conferenceKey in profile.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats available
if conference.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
profile.conferenceKeysToAttend.append(conferenceKey)
conference.seatsAvailable -= 1
returnValue = True
# unregister
else:
# check if user already registered
if conferenceKey in profile.conferenceKeysToAttend:
# unregister user, add back one seat
profile.conferenceKeysToAttend.remove(conferenceKey)
conference.seatsAvailable += 1
returnValue = True
else:
returnValue = False
# write things back to the datastore & return
profile.put()
conference.put()
return BooleanMessage(data=returnValue)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{webSafeKey}/register',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{webSafeKey}/unregister',
http_method='POST', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user from selected registered conference."""
return self._conferenceRegistration(request, register = False)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache;
used by memcache cron job & putAnnouncement(). """
nearSoldOutConferences = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0
)).fetch(
projection = [Conference.name]
)
if nearSoldOutConferences:
# format announcement and set it in memcache.
announcement = """Last chance to attend! The following conferences
are nearly sold out:
{nearSoldOutConferences}""".format(
nearSoldOutConferences = ", ".join(
c.name for c in nearSoldOutConferences
)
)
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# delete the memcache annoucements entry.
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
# TODO 1
# return an existing announcement from memcache OR an empty string.
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
# registers API
api = endpoints.api_server([ConferenceApi])
| apache-2.0 |
harlowja/networkx | networkx/algorithms/tree/recognition.py | 4 | 6598 | #-*- coding: utf-8 -*-
"""
Recognition Tests
=================
A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest.
Depending on the subfield, there are various conventions for generalizing these
definitions to directed graphs.
In one convention, directed variants of forest and tree are defined in an
identical manner, except that the direction of the edges is ignored. In effect,
each directed edge is treated as a single undirected edge. Then, additional
restrictions are imposed to define *branchings* and *arborescences*.
In another convention, directed variants of forest and tree correspond to
the previous convention's branchings and arborescences, respectively. Then two
new terms, *polyforest* and *polytree*, are defined to correspond to the other
convention's forest and tree.
Summarizing::
+-----------------------------+
| Convention A | Convention B |
+=============================+
| forest | polyforest |
| tree | polytree |
| branching | forest |
| arborescence | tree |
+-----------------------------+
Each convention has its reasons. The first convention emphasizes definitional
similarity in that directed forests and trees are only concerned with
acyclicity and do not have an in-degree constraint, just as their undirected
counterparts do not. The second convention emphasizes functional similarity
in the sense that the directed analog of a spanning tree is a spanning
arborescence. That is, take any spanning tree and choose one node as the root.
Then every edge is assigned a direction such there is a directed path from the
root to every other node. The result is a spanning arborescence.
NetworkX follows convention "A". Explicitly, these are:
undirected forest
An undirected graph with no undirected cycles.
undirected tree
A connected, undirected forest.
directed forest
A directed graph with no undirected cycles. Equivalently, the underlying
graph structure (which ignores edge orientations) is an undirected forest.
In convention B, this is known as a polyforest.
directed tree
A weakly connected, directed forest. Equivalently, the underlying graph
structure (which ignores edge orientations) is an undirected tree. In
convention B, this is known as a polytree.
branching
A directed forest with each node having, at most, one parent. So the maximum
in-degree is equal to 1. In convention B, this is known as a forest.
arborescence
A directed tree with each node having, at most, one parent. So the maximum
in-degree is equal to 1. In convention B, this is known as a tree.
For trees and arborescences, the adjective "spanning" may be added to designate
that the graph, when considered as a forest/branching, consists of a single
tree/arborescence that includes all nodes in the graph. It is true, by
definition, that every tree/arborescence is spanning with respect to the nodes
that define the tree/arborescence and so, it might seem redundant to introduce
the notion of "spanning". However, the nodes may represent a subset of
nodes from a larger graph, and it is in this context that the term "spanning"
becomes a useful notion.
"""
import networkx as nx
__author__ = """\n""".join([
'Ferdinando Papale <ferdinando.papale@gmail.com>',
'chebee7i <chebee7i@gmail.com>',
])
__all__ = ['is_arborescence', 'is_branching', 'is_forest', 'is_tree']
@nx.utils.not_implemented_for('undirected')
def is_arborescence(G):
"""
Returns ``True`` if ``G`` is an arborescence.
An arborescence is a directed tree with maximum in-degree equal to 1.
Parameters
----------
G : graph
The graph to test.
Returns
-------
b : bool
A boolean that is ``True`` if ``G`` is an arborescence.
Notes
-----
In another convention, an arborescence is known as a *tree*.
See Also
--------
is_tree
"""
return is_tree(G) and max(G.in_degree().values()) <= 1
@nx.utils.not_implemented_for('undirected')
def is_branching(G):
"""
Returns ``True`` if ``G`` is a branching.
A branching is a directed forest with maximum in-degree equal to 1.
Parameters
----------
G : directed graph
The directed graph to test.
Returns
-------
b : bool
A boolean that is ``True`` if ``G`` is a branching.
Notes
-----
In another convention, a branching is also known as a *forest*.
See Also
--------
is_forest
"""
return is_forest(G) and max(G.in_degree().values()) <= 1
def is_forest(G):
"""
Returns ``True`` if ``G`` is a forest.
A forest is a graph with no undirected cycles.
For directed graphs, ``G`` is a forest if the underlying graph is a forest.
The underlying graph is obtained by treating each directed edge as a single
undirected edge in a multigraph.
Parameters
----------
G : graph
The graph to test.
Returns
-------
b : bool
A boolean that is ``True`` if ``G`` is a forest.
Notes
-----
In another convention, a directed forest is known as a *polyforest* and
then *forest* corresponds to a *branching*.
See Also
--------
is_branching
"""
if len(G) == 0:
raise nx.exception.NetworkXPointlessConcept('G has no nodes.')
if G.is_directed():
components = nx.weakly_connected_component_subgraphs
else:
components = nx.connected_component_subgraphs
return all(len(c) - 1 == c.number_of_edges() for c in components(G))
def is_tree(G):
"""
Returns ``True`` if ``G`` is a tree.
A tree is a connected graph with no undirected cycles.
For directed graphs, ``G`` is a tree if the underlying graph is a tree. The
underlying graph is obtained by treating each directed edge as a single
undirected edge in a multigraph.
Parameters
----------
G : graph
The graph to test.
Returns
-------
b : bool
A boolean that is ``True`` if ``G`` is a tree.
Notes
-----
In another convention, a directed tree is known as a *polytree* and then
*tree* corresponds to an *arborescence*.
See Also
--------
is_arborescence
"""
if len(G) == 0:
raise nx.exception.NetworkXPointlessConcept('G has no nodes.')
if G.is_directed():
is_connected = nx.is_weakly_connected
else:
is_connected = nx.is_connected
# A connected graph with no cycles has n-1 edges.
return len(G) - 1 == G.number_of_edges() and is_connected(G)
| bsd-3-clause |
prakxys/flask | Work/Trivia - Module 5/env/Lib/site-packages/setuptools/command/bdist_wininst.py | 325 | 2283 | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
import os, sys
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command (self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
if not hasattr(_bdist_wininst, 'get_installer_filename'):
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
| apache-2.0 |
j-haj/interview-prep | src/algorithms/binary-search/main.py | 1 | 1165 | import math
import unittest
class TestSearch(unittest.TestCase):
def test_small_search(self):
l = [i for i in range(3)]
self.assertEqual(0, bsearch(l, 0))
def test_end_range(self):
l = [i for i in range(100)]
self.assertEqual(99, bsearch(l, 99))
def test_successful_search(self):
l = [i for i in range(100)]
self.assertEqual(31, bsearch(l, 31))
def test_failed_search(self):
l = [i for i in range(10)]
self.assertEqual(-1, bsearch(l, 15))
def bsearch(l, x):
''' Binary search
Precondition: l should be sorted
Args:
l: input list
x: element to find
Returns: index of `x` if x is in `l`, -1 otherwise
'''
n = len(l)
search_len = n//2
cur_idx = search_len
while (cur_idx > 0 and cur_idx < n):
search_len = max(search_len//2, 1)
if l[cur_idx] == x:
break
elif l[cur_idx] < x:
cur_idx += search_len
else:
cur_idx -= search_len
if cur_idx == n or cur_idx < 0:
return -1
else:
return cur_idx
if __name__ == '__main__':
unittest.main()
| mit |
kustodian/ansible | lib/ansible/module_utils/facts/sysctl.py | 135 | 1093 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
def get_sysctl(module, prefixes):
sysctl_cmd = module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
| gpl-3.0 |
anryko/ansible | lib/ansible/modules/network/aci/mso_schema_site_anp_epg.py | 13 | 6426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_anp_epg
short_description: Manage site-local Endpoint Groups (EPGs) in schema template
description:
- Manage site-local EPGs in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
epg:
description:
- The name of the EPG to manage.
type: str
aliases: [ name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_site_anp
- module: mso_schema_site_anp_epg_subnet
- module: mso_schema_template_anp_epg
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new site EPG
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: present
delegate_to: localhost
- name: Remove a site EPG
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: absent
delegate_to: localhost
- name: Query a specific site EPGs
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: query
delegate_to: localhost
register: query_result
- name: Query all site EPGs
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epg']],
['state', 'present', ['epg']],
],
)
schema = module.params.get('schema')
site = module.params.get('site')
template = module.params.get('template')
anp = module.params.get('anp')
epg = module.params.get('epg')
state = module.params.get('state')
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj.get('id')
# Get site
site_id = mso.lookup_site(site)
sites = [(s.get('siteId'), s.get('templateName')) for s in schema_obj.get('sites')]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get ANP
anp_ref = mso.anp_ref(schema_id=schema_id, template=template, anp=anp)
anps = [a.get('anpRef') for a in schema_obj.get('sites')[site_idx]['anps']]
if anp_ref not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp_ref)
# Get EPG
epg_ref = mso.epg_ref(schema_id=schema_id, template=template, anp=anp, epg=epg)
epgs = [e.get('epgRef') for e in schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs']]
if epg is not None and epg_ref in epgs:
epg_idx = epgs.index(epg_ref)
epg_path = '/sites/{0}/anps/{1}/epgs/{2}'.format(site_template, anp, epg)
mso.existing = schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs'][epg_idx]
if state == 'query':
if epg is None:
mso.existing = schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs']
elif not mso.existing:
mso.fail_json(msg="EPG '{epg}' not found".format(epg=epg))
mso.exit_json()
epgs_path = '/sites/{0}/anps/{1}/epgs'.format(site_template, anp)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=epg_path))
elif state == 'present':
payload = dict(
epgRef=dict(
schemaId=schema_id,
templateName=template,
anpName=anp,
epgName=epg,
),
)
mso.sanitize(payload, collate=True)
if not mso.existing:
ops.append(dict(op='add', path=epgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
amyvmiwei/chromium | third_party/scons/scons-local/SCons/compat/_scons_subprocess.py | 15 | 44648 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import string
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
try:
import threading
except ImportError:
# SCons: the threading module is only used by the communicate()
# method, which we don't actually use, so don't worry if we
# can't import it.
pass
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
# SCons: don't die on Python versions that don't have _subprocess.
try:
from _subprocess import *
except ImportError:
pass
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
try:
fcntl.F_GETFD
except AttributeError:
fcntl.F_GETFD = 1
try:
fcntl.F_SETFD
except AttributeError:
fcntl.F_SETFD = 2
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
try:
isinstance(1, int)
except TypeError:
def is_int(obj):
return type(obj) == type(1)
def is_int_or_long(obj):
return type(obj) in (type(1), type(1L))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
return isinstance(obj, (int, long))
try:
types.StringTypes
except AttributeError:
try:
types.StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
types.StringTypes = (types.StringType,)
def is_string(obj):
return type(obj) in types.StringTypes
else:
def is_string(obj):
return isinstance(obj, types.StringTypes)
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return apply(Popen, popenargs, kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = apply(call, popenargs, kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return string.join(result, '')
try:
object
except NameError:
class object:
pass
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not is_int_or_long(bufsize):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif is_int(stdin):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif is_int(stdout):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags = creationflags | CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise apply(WindowsError, e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif is_int(stdin):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif is_int(stdout):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if is_string(args):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
try:
set
except NameError:
# Fall-back for earlier Python versions, so epydoc
# can use this module directly to execute things.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
else:
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = string.join(exc_lines, '')
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset = input_offset + bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = string.join(stdout, '')
if stderr is not None:
stderr = string.join(stderr, '')
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
sys.stderr.write( "Gosh. No error.\n" )
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| bsd-3-clause |
Designist/audacity | lib-src/lv2/serd/waflib/Tools/javaw.py | 266 | 10142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,tempfile,shutil
from waflib import TaskGen,Task,Utils,Options,Build,Errors,Node,Logs
from waflib.Configure import conf
from waflib.TaskGen import feature,before_method,after_method
from waflib.Tools import ccroot
ccroot.USELIB_VARS['javac']=set(['CLASSPATH','JAVACFLAGS'])
SOURCE_RE='**/*.java'
JAR_RE='**/*'
class_check_source='''
public class Test {
public static void main(String[] argv) {
Class lib;
if (argv.length < 1) {
System.err.println("Missing argument");
System.exit(77);
}
try {
lib = Class.forName(argv[0]);
} catch (ClassNotFoundException e) {
System.err.println("ClassNotFoundException");
System.exit(1);
}
lib = null;
System.exit(0);
}
}
'''
@feature('javac')
@before_method('process_source')
def apply_java(self):
Utils.def_attrs(self,jarname='',classpath='',sourcepath='.',srcdir='.',jar_mf_attributes={},jar_mf_classpath=[])
nodes_lst=[]
outdir=getattr(self,'outdir',None)
if outdir:
if not isinstance(outdir,Node.Node):
outdir=self.path.get_bld().make_node(self.outdir)
else:
outdir=self.path.get_bld()
outdir.mkdir()
self.outdir=outdir
self.env['OUTDIR']=outdir.abspath()
self.javac_task=tsk=self.create_task('javac')
tmp=[]
srcdir=getattr(self,'srcdir','')
if isinstance(srcdir,Node.Node):
srcdir=[srcdir]
for x in Utils.to_list(srcdir):
if isinstance(x,Node.Node):
y=x
else:
y=self.path.find_dir(x)
if not y:
self.bld.fatal('Could not find the folder %s from %s'%(x,self.path))
tmp.append(y)
tsk.srcdir=tmp
if getattr(self,'compat',None):
tsk.env.append_value('JAVACFLAGS',['-source',self.compat])
if hasattr(self,'sourcepath'):
fold=[isinstance(x,Node.Node)and x or self.path.find_dir(x)for x in self.to_list(self.sourcepath)]
names=os.pathsep.join([x.srcpath()for x in fold])
else:
names=[x.srcpath()for x in tsk.srcdir]
if names:
tsk.env.append_value('JAVACFLAGS',['-sourcepath',names])
@feature('javac')
@after_method('apply_java')
def use_javac_files(self):
lst=[]
self.uselib=self.to_list(getattr(self,'uselib',[]))
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Exception:
self.uselib.append(x)
else:
y.post()
lst.append(y.jar_task.outputs[0].abspath())
self.javac_task.set_run_after(y.jar_task)
if lst:
self.env.append_value('CLASSPATH',lst)
@feature('javac')
@after_method('apply_java','propagate_uselib_vars','use_javac_files')
def set_classpath(self):
self.env.append_value('CLASSPATH',getattr(self,'classpath',[]))
for x in self.tasks:
x.env.CLASSPATH=os.pathsep.join(self.env.CLASSPATH)+os.pathsep
@feature('jar')
@after_method('apply_java','use_javac_files')
@before_method('process_source')
def jar_files(self):
destfile=getattr(self,'destfile','test.jar')
jaropts=getattr(self,'jaropts',[])
manifest=getattr(self,'manifest',None)
basedir=getattr(self,'basedir',None)
if basedir:
if not isinstance(self.basedir,Node.Node):
basedir=self.path.get_bld().make_node(basedir)
else:
basedir=self.path.get_bld()
if not basedir:
self.bld.fatal('Could not find the basedir %r for %r'%(self.basedir,self))
self.jar_task=tsk=self.create_task('jar_create')
if manifest:
jarcreate=getattr(self,'jarcreate','cfm')
node=self.path.find_node(manifest)
tsk.dep_nodes.append(node)
jaropts.insert(0,node.abspath())
else:
jarcreate=getattr(self,'jarcreate','cf')
if not isinstance(destfile,Node.Node):
destfile=self.path.find_or_declare(destfile)
if not destfile:
self.bld.fatal('invalid destfile %r for %r'%(destfile,self))
tsk.set_outputs(destfile)
tsk.basedir=basedir
jaropts.append('-C')
jaropts.append(basedir.bldpath())
jaropts.append('.')
tsk.env['JAROPTS']=jaropts
tsk.env['JARCREATE']=jarcreate
if getattr(self,'javac_task',None):
tsk.set_run_after(self.javac_task)
@feature('jar')
@after_method('jar_files')
def use_jar_files(self):
lst=[]
self.uselib=self.to_list(getattr(self,'uselib',[]))
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Exception:
self.uselib.append(x)
else:
y.post()
self.jar_task.run_after.update(y.tasks)
class jar_create(Task.Task):
color='GREEN'
run_str='${JAR} ${JARCREATE} ${TGT} ${JAROPTS}'
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
global JAR_RE
try:
self.inputs=[x for x in self.basedir.ant_glob(JAR_RE,remove=False)if id(x)!=id(self.outputs[0])]
except Exception:
raise Errors.WafError('Could not find the basedir %r for %r'%(self.basedir,self))
return super(jar_create,self).runnable_status()
class javac(Task.Task):
color='BLUE'
nocache=True
vars=['CLASSPATH','JAVACFLAGS','JAVAC','OUTDIR']
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
global SOURCE_RE
self.inputs=[]
for x in self.srcdir:
self.inputs.extend(x.ant_glob(SOURCE_RE,remove=False))
return super(javac,self).runnable_status()
def run(self):
env=self.env
gen=self.generator
bld=gen.bld
wd=bld.bldnode.abspath()
def to_list(xx):
if isinstance(xx,str):return[xx]
return xx
cmd=[]
cmd.extend(to_list(env['JAVAC']))
cmd.extend(['-classpath'])
cmd.extend(to_list(env['CLASSPATH']))
cmd.extend(['-d'])
cmd.extend(to_list(env['OUTDIR']))
cmd.extend(to_list(env['JAVACFLAGS']))
files=[a.path_from(bld.bldnode)for a in self.inputs]
tmp=None
try:
if len(str(files))+len(str(cmd))>8192:
(fd,tmp)=tempfile.mkstemp(dir=bld.bldnode.abspath())
try:
os.write(fd,'\n'.join(files))
finally:
if tmp:
os.close(fd)
if Logs.verbose:
Logs.debug('runner: %r'%(cmd+files))
cmd.append('@'+tmp)
else:
cmd+=files
ret=self.exec_command(cmd,cwd=wd,env=env.env or None)
finally:
if tmp:
os.remove(tmp)
return ret
def post_run(self):
for n in self.generator.outdir.ant_glob('**/*.class'):
n.sig=Utils.h_file(n.abspath())
self.generator.bld.task_sigs[self.uid()]=self.cache_sig
@feature('javadoc')
@after_method('process_rule')
def create_javadoc(self):
tsk=self.create_task('javadoc')
tsk.classpath=getattr(self,'classpath',[])
self.javadoc_package=Utils.to_list(self.javadoc_package)
if not isinstance(self.javadoc_output,Node.Node):
self.javadoc_output=self.bld.path.find_or_declare(self.javadoc_output)
class javadoc(Task.Task):
color='BLUE'
def __str__(self):
return'%s: %s -> %s\n'%(self.__class__.__name__,self.generator.srcdir,self.generator.javadoc_output)
def run(self):
env=self.env
bld=self.generator.bld
wd=bld.bldnode.abspath()
srcpath=self.generator.path.abspath()+os.sep+self.generator.srcdir
srcpath+=os.pathsep
srcpath+=self.generator.path.get_bld().abspath()+os.sep+self.generator.srcdir
classpath=env.CLASSPATH
classpath+=os.pathsep
classpath+=os.pathsep.join(self.classpath)
classpath="".join(classpath)
self.last_cmd=lst=[]
lst.extend(Utils.to_list(env['JAVADOC']))
lst.extend(['-d',self.generator.javadoc_output.abspath()])
lst.extend(['-sourcepath',srcpath])
lst.extend(['-classpath',classpath])
lst.extend(['-subpackages'])
lst.extend(self.generator.javadoc_package)
lst=[x for x in lst if x]
self.generator.bld.cmd_and_log(lst,cwd=wd,env=env.env or None,quiet=0)
def post_run(self):
nodes=self.generator.javadoc_output.ant_glob('**')
for x in nodes:
x.sig=Utils.h_file(x.abspath())
self.generator.bld.task_sigs[self.uid()]=self.cache_sig
def configure(self):
java_path=self.environ['PATH'].split(os.pathsep)
v=self.env
if'JAVA_HOME'in self.environ:
java_path=[os.path.join(self.environ['JAVA_HOME'],'bin')]+java_path
self.env['JAVA_HOME']=[self.environ['JAVA_HOME']]
for x in'javac java jar javadoc'.split():
self.find_program(x,var=x.upper(),path_list=java_path)
self.env[x.upper()]=self.cmd_to_list(self.env[x.upper()])
if'CLASSPATH'in self.environ:
v['CLASSPATH']=self.environ['CLASSPATH']
if not v['JAR']:self.fatal('jar is required for making java packages')
if not v['JAVAC']:self.fatal('javac is required for compiling java classes')
v['JARCREATE']='cf'
v['JAVACFLAGS']=[]
@conf
def check_java_class(self,classname,with_classpath=None):
javatestdir='.waf-javatest'
classpath=javatestdir
if self.env['CLASSPATH']:
classpath+=os.pathsep+self.env['CLASSPATH']
if isinstance(with_classpath,str):
classpath+=os.pathsep+with_classpath
shutil.rmtree(javatestdir,True)
os.mkdir(javatestdir)
Utils.writef(os.path.join(javatestdir,'Test.java'),class_check_source)
self.exec_command(self.env['JAVAC']+[os.path.join(javatestdir,'Test.java')],shell=False)
cmd=self.env['JAVA']+['-cp',classpath,'Test',classname]
self.to_log("%s\n"%str(cmd))
found=self.exec_command(cmd,shell=False)
self.msg('Checking for java class %s'%classname,not found)
shutil.rmtree(javatestdir,True)
return found
@conf
def check_jni_headers(conf):
if not conf.env.CC_NAME and not conf.env.CXX_NAME:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env.JAVA_HOME:
conf.fatal('set JAVA_HOME in the system environment')
javaHome=conf.env['JAVA_HOME'][0]
dir=conf.root.find_dir(conf.env.JAVA_HOME[0]+'/include')
if dir is None:
dir=conf.root.find_dir(conf.env.JAVA_HOME[0]+'/../Headers')
if dir is None:
conf.fatal('JAVA_HOME does not seem to be set properly')
f=dir.ant_glob('**/(jni|jni_md).h')
incDirs=[x.parent.abspath()for x in f]
dir=conf.root.find_dir(conf.env.JAVA_HOME[0])
f=dir.ant_glob('**/*jvm.(so|dll|dylib)')
libDirs=[x.parent.abspath()for x in f]or[javaHome]
f=dir.ant_glob('**/*jvm.(lib)')
if f:
libDirs=[[x,y.parent.abspath()]for x in libDirs for y in f]
for d in libDirs:
try:
conf.check(header_name='jni.h',define_name='HAVE_JNI_H',lib='jvm',libpath=d,includes=incDirs,uselib_store='JAVA',uselib='JAVA')
except Exception:
pass
else:
break
else:
conf.fatal('could not find lib jvm in %r (see config.log)'%libDirs)
| gpl-2.0 |
wzhuo918/release-1.1.2-MDP | src/contrib/hod/testing/main.py | 182 | 2928 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re
myPath = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myPath)
testingDir = os.path.join(rootDirectory, "testing")
sys.path.append(rootDirectory)
from testing.lib import printSeparator, printLine
moduleList = []
allList = []
excludes = [
]
# Build a module list by scanning through all files in testingDir
for file in os.listdir(testingDir):
if(re.search(r".py$", file) and re.search(r"^test", file)):
# All .py files with names starting in 'test'
module = re.sub(r"^test","",file)
module = re.sub(r".py$","",module)
allList.append(module)
if module not in excludes:
moduleList.append(module)
printLine("All testcases - %s" % allList)
printLine("Excluding the testcases - %s" % excludes)
printLine("Executing the testcases - %s" % moduleList)
testsResult = 0
# Now import each of these modules and start calling the corresponding
#testSuite methods
for moduleBaseName in moduleList:
try:
module = "testing.test" + moduleBaseName
suiteCaller = "Run" + moduleBaseName + "Tests"
printSeparator()
printLine("Running %s" % suiteCaller)
# Import the corresponding test cases module
imported_module = __import__(module , fromlist=[suiteCaller] )
# Call the corresponding suite method now
testRes = getattr(imported_module, suiteCaller)()
testsResult = testsResult + testRes
printLine("Finished %s. TestSuite Result : %s\n" % \
(suiteCaller, testRes))
except ImportError, i:
# Failed to import a test module
printLine(i)
testsResult = testsResult + 1
pass
except AttributeError, n:
# Failed to get suiteCaller from a test module
printLine(n)
testsResult = testsResult + 1
pass
except Exception, e:
# Test module suiteCaller threw some exception
printLine("%s failed. \nReason : %s" % (suiteCaller, e))
printLine("Skipping %s" % suiteCaller)
testsResult = testsResult + 1
pass
if testsResult != 0:
printSeparator()
printLine("Total testcases with failure or error : %s" % testsResult)
sys.exit(testsResult)
| apache-2.0 |
garlandkr/ansible | test/units/TestFilters.py | 13 | 3426 | '''
Test bundled filters
'''
import os.path
import unittest, tempfile, shutil
from ansible import playbook, inventory, callbacks
import ansible.runner.filter_plugins.core
INVENTORY = inventory.Inventory(['localhost'])
BOOK = '''
- hosts: localhost
vars:
var: { a: [1,2,3] }
tasks:
- template: src=%s dest=%s
'''
SRC = '''
-
{{ var|to_json }}
-
{{ var|to_nice_json }}
-
{{ var|to_yaml }}
-
{{ var|to_nice_yaml }}
'''
DEST = '''
-
{"a": [1, 2, 3]}
-
{
"a": [
1,
2,
3
]
}
-
a: [1, 2, 3]
-
a:
- 1
- 2
- 3
'''
class TestFilters(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(dir='/tmp')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def temp(self, name, data=''):
'''write a temporary file and return the name'''
name = self.tmpdir + '/' + name
with open(name, 'w') as f:
f.write(data)
return name
def test_bool_none(self):
a = ansible.runner.filter_plugins.core.bool(None)
assert a == None
def test_bool_true(self):
a = ansible.runner.filter_plugins.core.bool(True)
assert a == True
def test_bool_yes(self):
a = ansible.runner.filter_plugins.core.bool('Yes')
assert a == True
def test_bool_no(self):
a = ansible.runner.filter_plugins.core.bool('Foo')
assert a == False
def test_quotes(self):
a = ansible.runner.filter_plugins.core.quote('ls | wc -l')
assert a == "'ls | wc -l'"
def test_fileglob(self):
pathname = os.path.join(os.path.dirname(__file__), '*')
a = ansible.runner.filter_plugins.core.fileglob(pathname)
assert __file__ in a
def test_regex(self):
a = ansible.runner.filter_plugins.core.regex('ansible', 'ansible',
match_type='findall')
assert a == True
def test_match_case_sensitive(self):
a = ansible.runner.filter_plugins.core.match('ansible', 'ansible')
assert a == True
def test_match_case_insensitive(self):
a = ansible.runner.filter_plugins.core.match('ANSIBLE', 'ansible',
True)
assert a == True
def test_match_no_match(self):
a = ansible.runner.filter_plugins.core.match(' ansible', 'ansible')
assert a == False
def test_search_case_sensitive(self):
a = ansible.runner.filter_plugins.core.search(' ansible ', 'ansible')
assert a == True
def test_search_case_insensitive(self):
a = ansible.runner.filter_plugins.core.search(' ANSIBLE ', 'ansible',
True)
assert a == True
#def test_filters(self):
# this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
#return
#src = self.temp('src.j2', SRC)
#dest = self.temp('dest.txt')
#book = self.temp('book', BOOK % (src, dest))
#playbook.PlayBook(
# playbook = book,
# inventory = INVENTORY,
# transport = 'local',
# callbacks = callbacks.PlaybookCallbacks(),
# runner_callbacks = callbacks.DefaultRunnerCallbacks(),
# stats = callbacks.AggregateStats(),
#).run()
#out = open(dest).read()
#self.assertEqual(DEST, out)
| gpl-3.0 |
feliperfranca/FelipeRFranca_site | conteudo/views.py | 1 | 3367 | # Create your views here.
from conteudo.models import Academico, Profissional, Complementar, Competencia, Informacao,\
Timeline
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.views.generic.list import ListView
from layout.models import Tema
from datetime import datetime
def academico(request):
try:
atuacoes = Academico.objects.all()
except Academico.DoesNotExist:
raise Http404
return render_to_response('conteudo/academico_archive.html', {'atuacoes': atuacoes}, context_instance=RequestContext(request))
class academicoView(ListView):
context_object_name = 'atuacoes'
queryset = Academico.objects.all()
template_name = "conteudo/academico_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_academico)
class profissionalView(ListView):
context_object_name = 'atuacoes'
queryset = Profissional.objects.all()
template_name = "conteudo/profissional_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_profissional)
class complementarView(ListView):
context_object_name = 'formacoes'
queryset = Complementar.objects.all()
template_name = "conteudo/complementar_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_complementar)
class competenciasView(ListView):
context_object_name = 'competencias'
queryset = Competencia.objects.all()
template_name = "conteudo/competencias_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_competencias)
class informacoesView(ListView):
context_object_name = 'informacoes'
queryset = Informacao.objects.all()
template_name = "conteudo/informacoes_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_informacoes)
class timelineView(ListView):
context_object_name = 'timeline'
queryset = Timeline.objects.filter(date__range=["1987-10-22", datetime.now()])
template_name = "conteudo/timeline_archive.html"
def __init__(self, **kwargs):
ListView.__init__(self, **kwargs)
if Tema.objects.all():
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_home)
else:
tema = Tema(cor_principal='#0088cc',\
cor_home='#0088cc',\
cor_academico='#0088cc',\
cor_profissional='#0088cc',\
cor_complementar='#0088cc',\
cor_competencias='#0088cc',\
cor_informacoes='#0088cc',\
cor_fonte_padrao='#30353A',\
fonte_padrao='"Helvetica Neue", sans-serif',\
sideleft_width='250')
tema.save()
tema = Tema.objects.all()[0]
tema.set_main_color(tema.cor_home)
| bsd-3-clause |
DeMille/emailhooks | django_nonrel/django/contrib/auth/hashers.py | 34 | 16332 | from __future__ import unicode_literals
import base64
import functools
import hashlib
from django.dispatch import receiver
from django.conf import settings
from django.test.signals import setting_changed
from django.utils import importlib
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes, force_str
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import (
pbkdf2, constant_time_compare, get_random_string)
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD = '!' # This will never be a valid encoded hash
MAXIMUM_PASSWORD_LENGTH = 4096 # The maximum length a password can be to prevent DoS
HASHERS = None # lazily loaded from PASSWORD_HASHERS
PREFERRED_HASHER = None # defaults to first item in PASSWORD_HASHERS
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
global HASHERS, PREFERRED_HASHER
HASHERS = None
PREFERRED_HASHER = None
def password_max_length(max_length):
def inner(fn):
@functools.wraps(fn)
def wrapper(self, password, *args, **kwargs):
if len(password) > max_length:
raise ValueError("Invalid password; Must be less than or equal"
" to %d bytes" % max_length)
return fn(self, password, *args, **kwargs)
return wrapper
return inner
def is_password_usable(encoded):
if encoded is None or encoded == UNUSABLE_PASSWORD:
return False
try:
hasher = identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if not password or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt. If
password is None or blank then UNUSABLE_PASSWORD will be
returned which disallows logins.
"""
if not password:
return UNUSABLE_PASSWORD
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
def load_hashers(password_hashers=None):
global HASHERS
global PREFERRED_HASHER
hashers = []
if not password_hashers:
password_hashers = settings.PASSWORD_HASHERS
for backend in password_hashers:
try:
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
hasher_cls = getattr(mod, cls_name)
except (AttributeError, ImportError, ValueError):
raise ImproperlyConfigured("hasher not found: %s" % backend)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % backend)
hashers.append(hasher)
HASHERS = dict([(hasher.algorithm, hasher) for hasher in hashers])
PREFERRED_HASHER = hashers[0]
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
if PREFERRED_HASHER is None:
load_hashers()
return PREFERRED_HASHER
else:
if HASHERS is None:
load_hashers()
if algorithm not in HASHERS:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
return HASHERS[algorithm]
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
name = mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError:
raise ValueError("Couldn't load %s password algorithm "
"library" % name)
return module
raise ValueError("Hasher '%s' doesn't specify a library attribute" %
self.__class__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ascii
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError()
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError()
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError()
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256 with 10000 iterations.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 10000
digest = hashlib.sha256
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt, iterations=None):
assert password
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptPasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the py-bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt"
library = ("py-bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
data = bcrypt.hashpw(force_bytes(password), salt)
return "%s$%s" % (self.algorithm, data)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
return constant_time_compare(data, bcrypt.hashpw(force_bytes(password), data))
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return SortedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
| mit |
ulif/pulp | bindings/pulp/bindings/exceptions.py | 15 | 3895 | """
Defines exception classes to handle server connection and request exceptions
"""
from gettext import gettext as _
class RequestException(Exception):
"""
Base exception class for all exceptions that originate by the Pulp server. These
exceptions coming from the server use the standard exception structure and can be parsed
accordingly.
"""
def __init__(self, response_body):
Exception.__init__(self)
self.href = response_body.pop('_href', None)
self.http_request_method = response_body.pop('http_request_method', None)
self.http_status = response_body.pop('http_status', None)
self.error_message = response_body.pop('error_message', None)
self.exception = response_body.pop('exception', None)
self.traceback = response_body.pop('traceback', None)
# Anything not explicitly removed above represents extra data to further
# classify the exception.
self.extra_data = response_body
def __str__(self):
message_data = {'m': self.http_request_method,
'h': self.href,
's': self.http_status,
'g': self.error_message}
return _(
'RequestException: %(m)s request on %(h)s failed with %(s)s - %(g)s' % message_data)
# Response code = 400
class BadRequestException(RequestException):
pass
# Response code = 401
class PermissionsException(RequestException):
pass
# Response code = 404
class NotFoundException(RequestException):
pass
# Response code = 409
class ConflictException(RequestException):
pass
# Response code >= 500
class PulpServerException(RequestException):
pass
# Response code >= 500 and not a Pulp formatted error
class ApacheServerException(Exception):
"""
If Apache raises the error, it won't be in the standard Pulp format.
Therefore this class does not subclass RequestException and simply
stores the string returned from Apache.
We store the response body given to us with the error, but it's an HTML
page that basically says stuff broke, so it's not terribly useful. The
user will still likely need to go to the server to figure out what went
wrong.
"""
def __init__(self, message):
"""
@param message: the response body apache returns with the error
@type message: str
"""
Exception.__init__(self)
self.message = message
class ClientSSLException(Exception):
"""
Raised in the event the client-side libraries refuse to communicate with the server.
"""
pass
class ClientCertificateExpiredException(ClientSSLException):
"""
Raised when the client certificate has expired. The
client-side libraries will check for this before initiating the request.
"""
def __init__(self, cert_filename):
Exception.__init__(self)
self.cert_filename = cert_filename
class CertificateVerificationException(ClientSSLException):
"""
Raised when the client does not trust the authority that signed the server's SSL certificate.
This could indicate a man-in-the-middle attack, a self-signed certificate, or a certificate
signed by an untrusted certificate authority.
"""
pass
class MissingCAPathException(ClientSSLException):
"""
Raised when the bindings are given a ca_path that either doesn't exist or can't be determined to
exist due to permissions.
"""
pass
class ConnectionException(Exception):
"""
Exception to indicate a less than favorable response from the server.
The arguments are [0] the response status as an integer and
[1] the response message as a dict, if we managed to decode from json,
or a str if we didn't [2] potentially a traceback, if the server response
was a python error, otherwise it will be None
"""
pass
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.