repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
yahoo/pulsar | dashboard/django/stats/models.py | 4 | 8654 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db.models import *
from django.urls import reverse
# Used to store the latest
class LatestTimestamp(Model):
name = CharField(max_length=10, unique=True)
timestamp = BigIntegerField(default=0)
@python_2_unicode_compatible
class Cluster(Model):
name = CharField(max_length=200, unique=True)
serviceUrl = URLField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Broker(Model):
url = URLField(db_index=True)
cluster = ForeignKey(Cluster, on_delete=SET_NULL, db_index=True, null=True)
def __str__(self):
return self.url
@python_2_unicode_compatible
class ActiveBroker(Model):
broker = ForeignKey(Broker, on_delete=SET_NULL, db_index=True, null=True)
timestamp = BigIntegerField(db_index=True)
def __str__(self):
return self.broker.url
@python_2_unicode_compatible
class Property(Model):
name = CharField(max_length=200, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'properties'
@python_2_unicode_compatible
class Namespace(Model):
name = CharField(max_length=200)
property = ForeignKey(Property, on_delete=SET_NULL, db_index=True, null=True)
clusters = ManyToManyField(Cluster)
timestamp = BigIntegerField(db_index=True)
deleted = BooleanField(default=False)
def is_global(self):
return self.name.split('/', 2)[1] == 'global'
def is_v2(self):
return len(self.name.split('/', 2)) == 2
def __str__(self):
return self.name
class Meta:
index_together = ('name', 'timestamp', 'deleted')
@python_2_unicode_compatible
class Bundle(Model):
timestamp = BigIntegerField(db_index=True)
broker = ForeignKey(Broker, on_delete=SET_NULL, db_index=True, null=True)
namespace = ForeignKey(Namespace, on_delete=SET_NULL, db_index=True, null=True)
cluster = ForeignKey(Cluster, on_delete=SET_NULL, db_index=True, null=True)
range = CharField(max_length=200)
def __str__(self):
return str(self.pk) + '--' + self.namespace.name + '/' + self.range
@python_2_unicode_compatible
class Topic(Model):
name = CharField(max_length=1024, db_index=True)
active_broker = ForeignKey(ActiveBroker, on_delete=SET_NULL, db_index=True, null=True)
broker = ForeignKey(Broker, on_delete=SET_NULL, db_index=True, null=True)
namespace = ForeignKey(Namespace, on_delete=SET_NULL, db_index=True, null=True)
cluster = ForeignKey(Cluster, on_delete=SET_NULL, db_index=True, null=True)
bundle = ForeignKey(Bundle, on_delete=SET_NULL, db_index=True, null=True)
timestamp = BigIntegerField(db_index=True)
deleted = BooleanField(default=False)
averageMsgSize = IntegerField(default=0)
msgRateIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgRateOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgThroughputIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgThroughputOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
pendingAddEntriesCount = DecimalField(max_digits = 12, decimal_places=1, default=0)
producerCount = IntegerField(default=0)
subscriptionCount = IntegerField(default=0)
consumerCount = IntegerField(default=0)
storageSize = BigIntegerField(default=0)
backlog = BigIntegerField(default=0)
localRateIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
localRateOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
localThroughputIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
localThroughputOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
replicationRateIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
replicationRateOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
replicationThroughputIn = DecimalField(max_digits = 12, decimal_places=1, default=0)
replicationThroughputOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
replicationBacklog = BigIntegerField(default=0)
def short_name(self):
return self.name.split('/', 5)[-1]
def is_global(self):
return self.namespace.is_global()
def is_v2(self):
return self.namespace.is_v2()
def url_name(self):
return '/'.join(self.name.split('://', 1))
def get_absolute_url(self):
url = reverse('topic', args=[self.url_name()])
if self.namespace.is_global():
url += '?cluster=' + self.cluster.name
return url
class Meta:
index_together = ('name', 'cluster', 'timestamp', 'deleted')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Subscription(Model):
name = CharField(max_length=200)
topic = ForeignKey(Topic, on_delete=SET_NULL, null=True)
namespace = ForeignKey(Namespace, on_delete=SET_NULL, null=True, db_index=True)
timestamp = BigIntegerField(db_index=True)
deleted = BooleanField(default=False)
msgBacklog = BigIntegerField(default=0)
msgRateExpired = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgRateOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgRateRedeliver = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgThroughputOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
SUBSCRIPTION_TYPES = (
('N', 'Not connected'),
('E', 'Exclusive'),
('S', 'Shared'),
('F', 'Failover'),
)
subscriptionType = CharField(max_length=1, choices=SUBSCRIPTION_TYPES, default='N')
unackedMessages = BigIntegerField(default=0)
class Meta:
unique_together = ('name', 'topic', 'timestamp')
def __str__(self):
return self.name
class Consumer(Model):
timestamp = BigIntegerField(db_index=True)
subscription = ForeignKey(Subscription, on_delete=SET_NULL, db_index=True, null=True)
address = CharField(max_length=64, null=True)
availablePermits = IntegerField(default=0)
connectedSince = DateTimeField(null=True)
consumerName = CharField(max_length=256, null=True)
msgRateOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgRateRedeliver = DecimalField(max_digits = 12, decimal_places=1, default=0)
msgThroughputOut = DecimalField(max_digits = 12, decimal_places=1, default=0)
unackedMessages = BigIntegerField(default=0)
blockedConsumerOnUnackedMsgs = BooleanField(default=False)
class Replication(Model):
timestamp = BigIntegerField(db_index=True)
topic = ForeignKey(Topic, on_delete=SET_NULL, null=True)
local_cluster = ForeignKey(Cluster, on_delete=SET_NULL, null=True)
remote_cluster = ForeignKey(Cluster, on_delete=SET_NULL, null=True, related_name='remote_cluster')
msgRateIn = DecimalField(max_digits = 12, decimal_places=1)
msgThroughputIn = DecimalField(max_digits = 12, decimal_places=1)
msgRateOut = DecimalField(max_digits = 12, decimal_places=1)
msgThroughputOut = DecimalField(max_digits = 12, decimal_places=1)
msgRateExpired = DecimalField(max_digits = 12, decimal_places=1)
replicationBacklog = BigIntegerField(default=0)
connected = BooleanField(default=False)
replicationDelayInSeconds = IntegerField(default=0)
inboundConnectedSince = DateTimeField(null=True)
outboundConnectedSince = DateTimeField(null=True)
| apache-2.0 |
yiqingj/work | tests/python_tests/sqlite_test.py | 3 | 17828 | #!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, run_all
import os, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'sqlite' in mapnik.DatasourceCache.plugin_names():
def test_attachdb_with_relative_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
def test_attachdb_with_multiple_files():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch1@:memory:,scratch2@:memory:',
initdb='''
create table scratch1.attachedtest (the_geom);
create virtual table scratch2.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch2.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
# the above should not throw but will result in no features
eq_(feature,None)
def test_attachdb_with_absolute_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file=os.getcwd() + '/../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
def test_attachdb_with_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
def test_attachdb_with_explicit_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
index_table='myindex',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.myindex using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.myindex values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
def test_attachdb_with_sql_join():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='busines@business.sqlite'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
fs = ds.featureset()
feature = fs.next()
eq_(feature.id(),1)
expected = {
1995:0,
1996:0,
1997:0,
1998:0,
1999:0,
2000:0,
2001:0,
2002:0,
2003:0,
2004:0,
2005:0,
2006:0,
2007:0,
2008:0,
2009:0,
2010:0,
# this appears to be sqlites way of
# automatically handling clashing column names
'ISO3:1':'ATG',
'OGC_FID':1,
'area':44,
'fips':u'AC',
'iso2':u'AG',
'iso3':u'ATG',
'lat':17.078,
'lon':-61.783,
'name':u'Antigua and Barbuda',
'pop2005':83039,
'region':19,
'subregion':29,
'un':28
}
for k,v in expected.items():
try:
eq_(feature[str(k)],v)
except:
#import pdb;pdb.set_trace()
print 'invalid key/v %s/%s for: %s' % (k,v,feature)
def test_attachdb_with_sql_join_count():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='busines@business.sqlite'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),100)
def test_attachdb_with_sql_join_count2():
'''
sqlite3 world.sqlite
attach database 'business.sqlite' as business;
select count(*) from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='busines@business.sqlite'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
def test_attachdb_with_sql_join_count3():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects!) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='busines@business.sqlite'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
def test_attachdb_with_sql_join_count4():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! limit 1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='busines@business.sqlite'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),1)
def test_attachdb_with_sql_join_count5():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! and 1=2) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='busines@business.sqlite'
)
# nothing is able to join to business so we don't pick up business schema
eq_(len(ds.fields()),12)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float'])
eq_(len(ds.all_features()),0)
def test_subqueries():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='world_merc',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select OGC_FID,GEOMETRY from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(len(feature),1)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
# same as above, except with alias like postgres requires
# TODO - should we try to make this work?
#ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
# table='(select GEOMETRY,rowid as aliased_id,fips from world_merc) as table',
# key_field='aliased_id'
# )
#fs = ds.featureset()
#feature = fs.next()
#eq_(feature['aliased_id'],1)
#eq_(feature['fips'],u'AC')
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,OGC_FID as rowid,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['rowid'],1)
eq_(feature['fips'],u'AC')
def test_empty_db():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
eq_(len(ds.fields()),25)
eq_(ds.fields(),['OGC_FID', 'scalerank', 'labelrank', 'featurecla', 'sovereignt', 'sov_a3', 'adm0_dif', 'level', 'type', 'admin', 'adm0_a3', 'geou_dif', 'name', 'abbrev', 'postal', 'name_forma', 'terr_', 'name_sort', 'map_color', 'pop_est', 'gdp_md_est', 'fips_10_', 'iso_a2', 'iso_a3', 'iso_n3'])
eq_(ds.field_types(),['int', 'int', 'int', 'str', 'str', 'str', 'float', 'float', 'str', 'str', 'str', 'float', 'str', 'str', 'str', 'str', 'str', 'str', 'float', 'float', 'float', 'float', 'str', 'str', 'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
fs = ds.features(query)
def test_intersects_token1():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
def test_intersects_token2():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
def test_intersects_token3():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
# https://github.com/mapnik/mapnik/issues/1537
# this works because key_field is manually set
def test_db_with_one_text_column():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (alias TEXT,geometry BLOB);
insert into test1 values ("test",x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='alias'
)
eq_(len(ds.fields()),1)
eq_(ds.fields(),['alias'])
eq_(ds.field_types(),['str'])
fs = ds.all_features()
eq_(len(fs),1)
feat = fs[0]
#eq_(feat.id(),1)
eq_(feat['alias'],'test')
eq_(len(feat.geometries()),1)
eq_(feat.geometries()[0].to_wkt(),'Point(0 0)')
def test_that_64bit_int_fields_work():
ds = mapnik.SQLite(file='../data/sqlite/64bit_int.sqlite',
table='int_table',
use_spatial_index=False
)
eq_(len(ds.fields()),3)
eq_(ds.fields(),['OGC_FID','id','bigint'])
eq_(ds.field_types(),['int','int','int'])
fs = ds.featureset()
feat = fs.next()
eq_(feat.id(),1)
eq_(feat['OGC_FID'],1)
eq_(feat['bigint'],2147483648)
feat = fs.next()
eq_(feat.id(),2)
eq_(feat['OGC_FID'],2)
eq_(feat['bigint'],922337203685477580)
def test_null_id_field():
# silence null key warning: https://github.com/mapnik/mapnik/issues/1889
default_logging_severity = mapnik.logger.get_severity()
mapnik.logger.set_severity(mapnik.severity_type.None)
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
# note: the osm_id should be declared INTEGER PRIMARY KEY
# but in this case we intentionally do not make this a valid pkey
# otherwise sqlite would turn the null into a valid, serial id
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (osm_id INTEGER,geometry BLOB);
insert into test1 values (null,x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='osm_id'
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
mapnik.logger.set_severity(default_logging_severity)
if __name__ == "__main__":
setup()
run_all(eval(x) for x in dir() if x.startswith("test_"))
| lgpl-2.1 |
jianghuaw/nova | nova/conductor/manager.py | 1 | 56574 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import contextlib
import copy
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import versionutils
import six
from nova import availability_zones
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
from nova.i18n import _
from nova import image
from nova import manager
from nova import network
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova import profiler
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def targets_cell(fn):
"""Wrap a method and automatically target the instance's cell.
This decorates a method with signature func(self, context, instance, ...)
and automatically targets the context with the instance's cell
mapping. It does this by looking up the InstanceMapping.
"""
@functools.wraps(fn)
def wrapper(self, context, *args, **kwargs):
instance = kwargs.get('instance') or args[0]
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
LOG.error('InstanceMapping not found, unable to target cell',
instance=instance)
im = None
else:
LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
{'cell': im.cell_mapping.identity,
'meth': fn.__name__})
# NOTE(danms): Target our context to the cell for the rest of
# this request, so that none of the subsequent code needs to
# care about it.
nova_context.set_target_cell(context, im.cell_mapping)
return fn(self, context, *args, **kwargs)
return wrapper
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
# NOTE(hanlind): Do not convert older than requested objects,
# see bug #1596119.
if isinstance(result, nova_object.NovaObject):
target_version = object_versions[objname]
requested_version = versionutils.convert_version_to_tuple(
target_version)
actual_version = versionutils.convert_version_to_tuple(
result.VERSION)
do_backport = requested_version < actual_version
other_major_version = requested_version[0] != actual_version[0]
if do_backport or other_major_version:
result = result.obj_to_primitive(
target_version=target_version,
version_manifest=object_versions)
return result
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
@contextlib.contextmanager
def try_target_cell(context, cell):
"""If cell is not None call func with context.target_cell.
This is a method to help during the transition period. Currently
various mappings may not exist if a deployment has not migrated to
cellsv2. If there is no mapping call the func as normal, otherwise
call it in a target_cell context.
"""
if cell:
with nova_context.target_cell(context, cell) as cell_context:
yield cell_context
else:
yield context
@contextlib.contextmanager
def obj_target_cell(obj, cell):
"""Run with object's context set to a specific cell"""
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
yield target
@profiler.trace_cls("rpc")
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.17')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.network_api = network.API()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.notifier = rpc.get_notifier('compute', CONF.host)
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.UnsupportedPolicyException)
@targets_cell
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations, clean_shutdown, request_spec)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown, request_spec):
image = utils.get_image_from_system_metadata(
instance.system_metadata)
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload.
if not request_spec:
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone)
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec,
reservations, clean_shutdown)
# TODO(sbauza): Provide directly the RequestSpec object once
# _set_vm_state_and_notify() accepts it
legacy_spec = request_spec.to_legacy_request_spec_dict()
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = 'live-migration'
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.',
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
# Reset the task state to None to indicate completion of
# the operation as it is done in case of known exceptions.
_set_vm_state(context, instance, ex, vm_states.ERROR,
task_state=None)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.scheduler_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor,
request_spec, reservations,
clean_shutdown):
return migrate.MigrationTask(context, instance, flavor,
request_spec,
reservations, clean_shutdown,
self.compute_rpcapi,
self.scheduler_client)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
build_request = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host['host'])
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
# NOTE(danms): This is never cell-targeted because it is only used for
# cellsv1 (which does not target cells directly) and n-cpu reschedules
# (which go to the cell conductor and thus are always cell-specific).
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
request_spec = {}
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
# TODO(sbauza): Provide directly the RequestSpec object
# when _set_vm_state_and_notify() and populate_retry()
# accept it
request_spec = scheduler_utils.build_request_spec(
context, image, instances)
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self._schedule_instances(
context, spec_obj, instance_uuids)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
try:
# If the BuildRequest stays around then instance show/lists
# will pull from it rather than the errored instance.
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
pass
self._cleanup_allocated_networks(
context, instance, requested_networks)
return
for (instance, host) in six.moves.zip(instances, hosts):
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host['host']))
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
instance.save()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# This is populated in scheduler_utils.populate_retry
num_attempts = local_filter_props.get('retry',
{}).get('num_attempts', 1)
if num_attempts <= 1:
# If this is a reschedule the instance is already mapped to
# this cell and the BuildRequest is already deleted so ignore
# the logic below.
inst_mapping = self._populate_instance_mapping(context,
instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in
# the API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _schedule_instances(self, context, request_spec,
instance_uuids=None):
scheduler_utils.setup_instance_group(context, request_spec)
hosts = self.scheduler_client.select_destinations(context,
request_spec, instance_uuids)
return hosts
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is
# old. We need to mock that the old way
filter_properties = {}
request_spec = scheduler_utils.build_request_spec(
context, image, [instance])
else:
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_filter_properties and populate_retry()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.\
to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
instance_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Requesting cell %(cell)s while unshelving',
{'cell': instance_mapping.cell_mapping.identity},
instance=instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = (
instance_mapping.cell_mapping)
else:
request_spec.requested_destination = (
objects.Destination(
cell=instance_mapping.cell_mapping))
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning("No valid host found for unshelve instance",
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error("Unshelve attempted but an error "
"has occurred", instance=instance)
else:
LOG.error('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED', instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
@targets_cell
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
if not host:
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
# TODO(sbauza): Provide directly the RequestSpec object
# when _set_vm_state_and_notify() accepts it
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
else:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = request_spec.ignore_hosts or []
request_spec.ignore_hosts.append(instance.host)
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
try:
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_dict = hosts.pop(0)
host, node, limits = (host_dict['host'],
host_dict['nodename'],
host_dict['limits'])
except exception.NoValidHost as ex:
request_spec = request_spec.to_legacy_request_spec_dict()
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning("No valid host found for rebuild",
instance=instance)
except exception.UnsupportedPolicyException as ex:
request_spec = request_spec.to_legacy_request_spec_dict()
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning("Server with unsupported policy "
"cannot be rebuilt", instance=instance)
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits)
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()
return instance_block_device_mapping
def _create_tags(self, context, instance_uuid, tags):
"""Create the Tags objects in the db."""
if tags:
tag_list = [tag.tag for tag in tags]
instance_tags = objects.TagList.create(
context, instance_uuid, tag_list)
return instance_tags
else:
return tags
def _bury_in_cell0(self, context, request_spec, exc,
build_requests=None, instances=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
schedule them somewhere real. Requests that don't yet have instances
will get a new instance, created in cell0. Instances that have not yet
been created will be created in cell0. All build requests are destroyed
after we're done. Failure to delete a build request will trigger the
instance deletion, just like the happy path in
schedule_and_build_instances() below.
"""
try:
cell0 = objects.CellMapping.get_by_uuid(
context, objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
LOG.error('No cell mapping found for cell0 while '
'trying to record scheduling failure. '
'Setup is incomplete.')
return
build_requests = build_requests or []
instances = instances or []
instances_by_uuid = {inst.uuid: inst for inst in instances}
for build_request in build_requests:
if build_request.instance_uuid not in instances_by_uuid:
# This is an instance object with no matching db entry.
instance = build_request.get_new_instance(context)
instances_by_uuid[instance.uuid] = instance
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
legacy_spec = request_spec.to_legacy_request_spec_dict()
for instance in instances_by_uuid.values():
with obj_target_cell(instance, cell0) as cctxt:
instance.create()
# Use the context targeted to cell0 here since the instance is
# now in cell0.
self._set_vm_state_and_notify(
cctxt, instance.uuid, 'build_instances', updates,
exc, legacy_spec)
try:
# We don't need the cell0-targeted context here because the
# instance mapping is in the API DB.
inst_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell0
inst_mapping.save()
except exception.InstanceMappingNotFound:
pass
for build_request in build_requests:
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# Instance was deleted before we finished scheduling
inst = instances_by_uuid[build_request.instance_uuid]
with obj_target_cell(inst, cell0):
inst.destroy()
def schedule_and_build_instances(self, context, build_requests,
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping,
tags=None):
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
hosts = self._schedule_instances(context, request_specs[0],
instance_uuids)
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests)
return
host_mapping_cache = {}
instances = []
for (build_request, request_spec, host) in six.moves.zip(
build_requests, request_specs, hosts):
instance = build_request.get_new_instance(context)
# Convert host from the scheduler into a cell record
if host['host'] not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host['host'])
host_mapping_cache[host['host']] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host['host']})
self._bury_in_cell0(context, request_spec, exc,
build_requests=[build_request],
instances=[instance])
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
else:
host_mapping = host_mapping_cache[host['host']]
cell = host_mapping.cell_mapping
# Before we create the instance, let's make one final check that
# the build request is still around and wasn't deleted by the user
# already.
try:
objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
except exception.BuildRequestNotFound:
# the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
else:
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host['host']))
with obj_target_cell(instance, cell):
instance.create()
instances.append(instance)
# NOTE(melwitt): We recheck the quota after creating the
# objects to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
except exception.TooManyInstances as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
request_specs)
for (build_request, request_spec, host, instance) in six.moves.zip(
build_requests, request_specs, hosts, instances):
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="conductor")
with obj_target_cell(instance, cell) as cctxt:
objects.InstanceAction.action_start(
cctxt, instance.uuid, instance_actions.CREATE,
want_result=False)
instance_bdms = self._create_block_device_mapping(
cell, instance.flavor, instance.uuid, block_device_mapping)
instance_tags = self._create_tags(cctxt, instance.uuid, tags)
# TODO(Kevin Zheng): clean this up once instance.create() handles
# tags; we do this so the instance.create notification in
# build_and_run_instance in nova-compute doesn't lazy-load tags
instance.tags = instance_tags if instance_tags \
else objects.TagList()
# Update mapping for instance. Normally this check is guarded by
# a try/except but if we're here we know that a newer nova-api
# handled the build process and would have created the mapping
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
if not self._delete_build_request(
context, build_request, instance, cell, instance_bdms,
instance_tags):
# The build request was deleted before/during scheduling so
# the instance is gone and we don't have anything to build for
# this one.
continue
# NOTE(danms): Compute RPC expects security group names or ids
# not objects, so convert this to a list of names until we can
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell) as cctxt:
self.compute_rpcapi.build_and_run_instance(
cctxt, instance=instance, image=image,
request_spec=request_spec,
filter_properties=filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host['host'], node=host['nodename'],
limits=host['limits'])
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs):
for (instance, build_request, request_spec) in six.moves.zip(
instances, build_requests, request_specs):
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
if instance is None:
continue
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
legacy_spec = request_spec.to_legacy_request_spec_dict()
self._set_vm_state_and_notify(context, instance.uuid,
'build_instances', updates, exc,
legacy_spec)
# Be paranoid about artifacts being deleted underneath us.
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
request_spec.destroy()
except exception.RequestSpecNotFound:
pass
def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms, instance_tags):
"""Delete a build request after creating the instance in the cell.
This method handles cleaning up the instance in case the build request
is already deleted by the time we try to delete it.
:param context: the context of the request being handled
:type context: nova.context.RequestContext
:param build_request: the build request to delete
:type build_request: nova.objects.BuildRequest
:param instance: the instance created from the build_request
:type instance: nova.objects.Instance
:param cell: the cell in which the instance was created
:type cell: nova.objects.CellMapping
:param instance_bdms: list of block device mappings for the instance
:type instance_bdms: nova.objects.BlockDeviceMappingList
:param instance_tags: list of tags for the instance
:type instance_tags: nova.objects.TagList
:returns: True if the build request was successfully deleted, False if
the build request was already deleted and the instance is now gone.
"""
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# This indicates an instance deletion request has been
# processed, and the build should halt here. Clean up the
# bdm, tags and instance record.
with obj_target_cell(instance, cell) as cctxt:
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance):
try:
instance.destroy()
except exception.InstanceNotFound:
pass
except exception.ObjectActionError:
# NOTE(melwitt): Instance became scheduled during
# the destroy, "host changed". Refresh and re-destroy.
try:
instance.refresh()
instance.destroy()
except exception.InstanceNotFound:
pass
for bdm in instance_bdms:
with obj_target_cell(bdm, cell):
try:
bdm.destroy()
except exception.ObjectActionError:
pass
if instance_tags:
with try_target_cell(context, cell) as target_ctxt:
try:
objects.TagList.destroy(target_ctxt, instance.uuid)
except exception.InstanceNotFound:
pass
return False
return True
| apache-2.0 |
eyalfa/spark | examples/src/main/python/mllib/summary_statistics_example.py | 128 | 1550 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
import numpy as np
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="SummaryStatisticsExample") # SparkContext
# $example on$
mat = sc.parallelize(
[np.array([1.0, 10.0, 100.0]), np.array([2.0, 20.0, 200.0]), np.array([3.0, 30.0, 300.0])]
) # an RDD of Vectors
# Compute column summary statistics.
summary = Statistics.colStats(mat)
print(summary.mean()) # a dense vector containing the mean value for each column
print(summary.variance()) # column-wise variance
print(summary.numNonzeros()) # number of nonzeros in each column
# $example off$
sc.stop()
| apache-2.0 |
leiferikb/bitpop | build/third_party/twisted_10_2/twisted/conch/test/test_mixin.py | 62 | 1110 | # -*- twisted.conch.test.test_mixin -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from twisted.internet import reactor, protocol
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch import mixin
class TestBufferingProto(mixin.BufferingMixin):
scheduled = False
rescheduled = 0
def schedule(self):
self.scheduled = True
return object()
def reschedule(self, token):
self.rescheduled += 1
class BufferingTest(unittest.TestCase):
def testBuffering(self):
p = TestBufferingProto()
t = p.transport = StringTransport()
self.failIf(p.scheduled)
L = ['foo', 'bar', 'baz', 'quux']
p.write('foo')
self.failUnless(p.scheduled)
self.failIf(p.rescheduled)
for s in L:
n = p.rescheduled
p.write(s)
self.assertEquals(p.rescheduled, n + 1)
self.assertEquals(t.value(), '')
p.flush()
self.assertEquals(t.value(), 'foo' + ''.join(L))
| gpl-3.0 |
Jackeagle/kernel_caf | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
abhiroyg/coala | tests/results/ResultTest.py | 4 | 6085 | import unittest
import json
from os.path import abspath
from coalib.results.Diff import Diff
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.results.SourceRange import SourceRange
from coalib.output.JSONEncoder import create_json_encoder
class ResultTest(unittest.TestCase):
def test_origin(self):
uut = Result("origin", "msg")
self.assertEqual(uut.origin, "origin")
uut = Result(self, "msg")
self.assertEqual(uut.origin, "ResultTest")
uut = Result(None, "msg")
self.assertEqual(uut.origin, "")
def test_invalid_severity(self):
with self.assertRaises(ValueError):
Result("o", "m", severity=-5)
def test_string_dict(self):
uut = Result(None, "")
output = uut.to_string_dict()
self.assertEqual(output, {"id": str(uut.id),
"origin": "",
"message": "",
"file": "",
"line_nr": "",
"severity": "NORMAL",
"debug_msg": "",
"additional_info": "",
"confidence": "100"})
uut = Result.from_values(origin="origin",
message="msg",
file="file",
line=2,
severity=RESULT_SEVERITY.INFO,
additional_info="hi!",
debug_msg="dbg",
confidence=50)
output = uut.to_string_dict()
self.assertEqual(output, {"id": str(uut.id),
"origin": "origin",
"message": "msg",
"file": abspath("file"),
"line_nr": "2",
"severity": "INFO",
"debug_msg": "dbg",
"additional_info": "hi!",
"confidence": "50"})
uut = Result.from_values(origin="o", message="m", file="f", line=5)
output = uut.to_string_dict()
self.assertEqual(output["line_nr"], "5")
def test_apply(self):
file_dict = {
"f_a": ["1", "2", "3"],
"f_b": ["1", "2", "3"]
}
expected_file_dict = {
"f_a": ["1", "3_changed"],
"f_b": ["1", "2", "3"]
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
diff.change_line(3, "3", "3_changed")
uut = Result("origin", "msg", diffs={"f_a": diff})
uut.apply(file_dict)
self.assertEqual(file_dict, expected_file_dict)
def test_add(self):
file_dict = {
"f_a": ["1", "2", "3"],
"f_b": ["1", "2", "3"],
"f_c": ["1", "2", "3"]
}
expected_file_dict = {
"f_a": ["1", "3_changed"],
"f_b": ["1", "2", "3_changed"],
"f_c": ["1", "2", "3"]
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
uut1 = Result("origin", "msg", diffs={"f_a": diff})
diff = Diff(file_dict['f_a'])
diff.change_line(3, "3", "3_changed")
uut2 = Result("origin", "msg", diffs={"f_a": diff})
diff = Diff(file_dict['f_b'])
diff.change_line(3, "3", "3_changed")
uut3 = Result("origin", "msg", diffs={"f_b": diff})
uut1 += uut2 + uut3
uut1.apply(file_dict)
self.assertEqual(file_dict, expected_file_dict)
def test_overlaps(self):
overlapping_range = SourceRange.from_values("file1", 1, 1, 2, 2)
nonoverlapping_range = SourceRange.from_values("file2", 1, 1, 2, 2)
uut = Result.from_values("origin",
"message",
file="file1",
line=1,
column=1,
end_line=2,
end_column=2)
self.assertTrue(uut.overlaps(overlapping_range))
self.assertTrue(uut.overlaps([overlapping_range]))
self.assertFalse(uut.overlaps(nonoverlapping_range))
def test_location_repr(self):
result_a = Result(origin="o", message="m")
self.assertEqual(result_a.location_repr(), "the whole project")
result_b = Result.from_values("o", "m", file="e")
self.assertEqual(result_b.location_repr(), "'e'")
affected_code = (SourceRange.from_values('f'),
SourceRange.from_values('g'))
result_c = Result("o", "m", affected_code=affected_code)
self.assertEqual(result_c.location_repr(), "'f', 'g'")
affected_code = (SourceRange.from_values('f'),
SourceRange.from_values('f'))
result_d = Result("o", "m", affected_code=affected_code)
self.assertEqual(result_d.location_repr(), "'f'")
def test_json_diff(self):
file_dict = {
"f_a": ["1", "2", "3"],
"f_b": ["1", "2", "3"]
}
expected_file = {
"f_a": ["1", "3_changed"],
"f_b": ["1", "2", "3"]
}
diff = Diff(file_dict['f_a'])
diff.delete_line(2)
diff.change_line(3, "3", "3_changed")
uut = Result("origin", "msg", diffs={"f_a": diff}).__json__(True)
self.assertEqual(uut["diffs"]['f_a'].__json__(), "--- \n"
"+++ \n"
"@@ -1,3 +1,2 @@\n"
" 1-2-3+3_changed")
JSONEncoder = create_json_encoder(use_relpath=True)
json_dump = json.dumps(diff, cls=JSONEncoder, sort_keys=True)
self.assertEqual(
json_dump, '"--- \\n+++ \\n@@ -1,3 +1,2 @@\\n 1-2-3+3_changed"')
| agpl-3.0 |
landism/pants | contrib/android/tests/python/pants_test/contrib/android/tasks/test_aapt_builder_integration.py | 14 | 3770 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import unittest
from pants_test.contrib.android.android_integration_test import AndroidIntegrationTest
class AaptBuilderIntegrationTest(AndroidIntegrationTest):
"""Integration test for AaptBuilder, which builds an unsigned .apk
The Android SDK is modular, finding an SDK on the PATH is no guarantee that there is
an aapt binary anywhere on disk. The TOOLS are the ones required by the target in the
'test_aapt_bundle' method. If you add a target, you may need to expand the TOOLS list
and perhaps define new BUILD_TOOLS or TARGET_SDK class variables.
"""
TOOLS = [
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'aapt'),
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'lib', 'dx.jar'),
os.path.join('platforms', 'android-' + AndroidIntegrationTest.TARGET_SDK, 'android.jar')
]
tools = AndroidIntegrationTest.requirements(TOOLS)
@unittest.skipUnless(tools, reason='Android integration test requires tools {} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_aapt_bundle(self):
self.bundle_test(AndroidIntegrationTest.TEST_TARGET)
def bundle_test(self, target):
pants_run = self.run_pants(['apk', target])
self.assert_success(pants_run)
@unittest.skipUnless(tools, reason='Android integration test requires tools {} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_android_library_products(self):
# Doing the work under a tempdir gives us a handle for the workdir and guarantees a clean build.
with self.temporary_workdir() as workdir:
spec = 'contrib/android/examples/src/android/hello_with_library:'
pants_run = self.run_pants_with_workdir(['apk', '-ldebug', spec], workdir)
self.assert_success(pants_run)
# Make sure that the unsigned apk was produced for the binary target.
apk_file = 'apk/apk/org.pantsbuild.examples.hello_with_library.unsigned.apk'
self.assertEqual(os.path.isfile(os.path.join(workdir, apk_file)), True)
# Scrape debug statements.
def find_aapt_blocks(lines):
for line in lines:
if re.search(r'Executing: .*?\baapt package -f -M', line):
yield line
aapt_blocks = list(find_aapt_blocks(pants_run.stderr_data.split('\n')))
# Only one apk is built, so only one aapt invocation here, for any number of dependent libs.
self.assertEquals(len(aapt_blocks), 1, 'Expected one invocation of the aapt tool! '
'(was: {})\n{}'.format(len(aapt_blocks),
pants_run.stderr_data))
# Check to make sure the resources are being passed in correct order (apk->libs).
for line in aapt_blocks:
resource_dirs = re.findall(r'-S ([^\s]+)', line)
self.assertEqual(resource_dirs[0], 'contrib/android/examples/src/android/hello_with_library/main/res')
self.assertEqual(resource_dirs[1], 'contrib/android/examples/src/android/example_library/res')
# The other six are google-play-services v21 resource_dirs. Their presence is enough.
self.assertEquals(len(resource_dirs), 8, 'Expected eight resource dirs to be included '
'when calling aapt on hello_with_library apk.'
' (was: {})\n'.format(resource_dirs))
| apache-2.0 |
jeffreyliu3230/osf.io | website/addons/dataverse/views/config.py | 3 | 4673 | # -*- coding: utf-8 -*-
import httplib as http
from flask import request
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.project import decorators
from website.util.sanitize import assert_clean
from website.addons.dataverse import client
from website.addons.dataverse.provider import DataverseProvider
from website.addons.dataverse.serializer import DataverseSerializer
from website.oauth.models import ExternalAccount
@must_be_logged_in
def dataverse_get_user_accounts(auth):
""" Returns the list of all of the current user's authorized Dataverse accounts """
return DataverseSerializer(
user_settings=auth.user.get_addon('dataverse')
).serialized_user_settings
@must_be_logged_in
def dataverse_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
user = auth.user
provider = DataverseProvider()
host = request.json.get('host').rstrip('/')
api_token = request.json.get('api_token')
# Verify that credentials are valid
client.connect_or_401(host, api_token)
# Note: `DataverseSerializer` expects display_name to be a URL
try:
provider.account = ExternalAccount(
provider=provider.short_name,
provider_name=provider.name,
display_name=host, # no username; show host
oauth_key=host, # hijacked; now host
oauth_secret=api_token, # hijacked; now api_token
provider_id=api_token, # Change to username if Dataverse allows
)
provider.account.save()
except KeyExistsException:
# ... or get the old one
provider.account = ExternalAccount.find_one(
Q('provider', 'eq', provider.short_name) &
Q('provider_id', 'eq', api_token)
)
assert provider.account is not None
if provider.account not in user.external_accounts:
user.external_accounts.append(provider.account)
user.save()
return {}
@must_be_logged_in
@decorators.must_be_valid_project
@decorators.must_have_addon('dataverse', 'node')
def dataverse_get_config(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
result = DataverseSerializer(
user_settings=auth.user.get_addon('dataverse'),
node_settings=node_addon,
).serialized_node_settings
return {'result': result}, http.OK
@decorators.must_have_permission('write')
@decorators.must_have_addon('dataverse', 'user')
@decorators.must_have_addon('dataverse', 'node')
def dataverse_get_datasets(node_addon, **kwargs):
"""Get list of datasets from provided Dataverse alias"""
alias = request.json.get('alias')
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
datasets = client.get_datasets(dataverse)
ret = {
'alias': alias, # include alias to verify dataset container
'datasets': [{'title': dataset.title, 'doi': dataset.doi} for dataset in datasets],
}
return ret, http.OK
@decorators.must_have_permission('write')
@decorators.must_have_addon('dataverse', 'user')
@decorators.must_have_addon('dataverse', 'node')
def dataverse_set_config(node_addon, auth, **kwargs):
"""Saves selected Dataverse and dataset to node settings"""
user_settings = node_addon.user_settings
user = auth.user
if user_settings and user_settings.owner != user:
raise HTTPError(http.FORBIDDEN)
try:
assert_clean(request.json)
except AssertionError:
# TODO: Test me!
raise HTTPError(http.NOT_ACCEPTABLE)
alias = request.json.get('dataverse').get('alias')
doi = request.json.get('dataset').get('doi')
if doi is None:
return HTTPError(http.BAD_REQUEST)
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
node_addon.dataverse_alias = dataverse.alias
node_addon.dataverse = dataverse.title
node_addon.dataset_doi = dataset.doi
node_addon.dataset_id = dataset.id
node_addon.dataset = dataset.title
node = node_addon.owner
node.add_log(
action='dataverse_dataset_linked',
params={
'project': node.parent_id,
'node': node._primary_key,
'dataset': dataset.title,
},
auth=auth,
)
node_addon.save()
return {'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK
| apache-2.0 |
jakobharlan/avango | avango-blender/blender-addon/avango_panel.py | 3 | 1843 | import bpy
from bpy.props import BoolProperty
class AvangoPanel(bpy.types.Panel):
'''
: intended to handle io of OSC messages
'''
bl_idname = "AvangoPanel"
bl_label = "Avango panel"
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_category = 'FLOW'
bl_options = {'DEFAULT_CLOSED'}
use_pin = True
@classmethod
def poll(cls, context):
try:
return context.space_data.node_tree.bl_idname ==\
'AvangoCustomTreeType'
except:
return False
def draw(self, context):
pass
# ntree = context.space_data.node_tree
# layout = self.layout
# col = layout.column()
# tstr = 'start' if not ntree.avango_state else 'end'
# col.operator('wm.spflow_osc_server', text=tstr).mode = tstr
# # show some controls when server is started
# if tstr == 'end':
# col.operator(
# 'wm.spflow_eval_synthdef',
# text='send',
# ).mode = 'send'
# col.operator(
# 'wm.spflow_eval_synthdef',
# text='trigger',
# ).mode = 'trigger'
# col.operator(
# 'wm.spflow_eval_synthdef',
# text='free',
# ).mode = 'free'
# col.operator(
# 'wm.spflow_eval_synthdef',
# text='freeAll',
# ).mode = 'freeAll'
def register():
bpy.types.AvangoCustomTreeType.avango_state = BoolProperty(
default=False,
description=
'toggle used to indicate state of osc client and hide buttons'
)
bpy.utils.register_class(AvangoPanel)
def unregister():
bpy.utils.unregister_class(AvangoPanel)
del bpy.types.AvangoCustomTreeType.avango_state
| lgpl-3.0 |
E7ernal/quizwhiz | quizard/views/Results.py | 1 | 5042 | # vim: ts=4:sw=4:expandtabs
__author__ = 'zach.mott@gmail.com'
from django.conf import settings
from django.views import generic
from django.contrib import messages
from django.shortcuts import redirect
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from email_utils.tasks import send_mail
from quizard.models.Assignment import Assignment
class Results(generic.DetailView):
model = Assignment
slug_field = 'code'
slug_url_kwarg = 'code'
context_object_name = 'assignment'
template_name = 'quizard/results.html'
def get(self, request, *pos, **kw):
# If the user isn't currently working on an assignment,
# they shouldn't be allowed to access the results page.
if 'assignment_code' not in self.request.session:
messages.info(request, _('You must complete an assignment before visiting the results page.'))
return redirect('index')
# If the assignment is still in progress (i.e., we have a current position),
# send the user back to that position rather than allowing them to view their
# (incomplete) results.
if isinstance(request.session.get('assignment_in_progress', None), basestring):
messages.info(request, _('You must complete this assignment before viewing your results.'))
return redirect(request.session['assignment_in_progress'])
return super(Results, self).get(request, *pos, **kw)
def get_context_data(self, **kw):
context = super(Results, self).get_context_data(**kw)
context.update({
'points_earned': self.object.calculate_score(self.request.session['answers']),
'questions': self.build_question_dicts(
context['assignment'],
self.request.session['answers']
)
})
# Record the user's score on this assignment.
completed_assignments = self.request.session.get('completed_assignments', {})
completed_assignments[self.object.code] = context['points_earned']
self.request.session['completed_assignments'] = completed_assignments
# Clear the user's current assignment.
# del self.request.session['assignment_code']
self.request.session.modified = True
self.send_emails()
return context
def build_question_dicts(self, assignment, answers):
question_list = []
for question in assignment.questions.all():
question_list.append({
'question': question,
'answer': answers[str(question.pk)],
'correct': question.validate_answer(answers[str(question.pk)]),
})
return question_list
def send_emails(self):
self.send_teacher_email(self.object)
self.send_summary_email(self.object)
def send_teacher_email(self, assignment):
"""
Email the assignment creator the results of this particular
quiz-taking session.
"""
self._send_email(
assignment,
assignment.created_by.email,
_("{assignment.code} results -- {assignee_name}").format(
assignment=assignment,
assignee_name=self.request.session['assignee_name'],
),
'quizard/emails/assignment_results.txt'
)
def send_summary_email(self, assignment):
"""
Sent a results receipt to the given third-party, if there is one.
"""
if self.request.session.get('assignee_email', None):
self._send_email(
assignment,
self.request.session['assignee_email'],
_("{assignment.code} summary -- {assignee_name}").format(
assignment=assignment,
assignee_name=self.request.session['assignee_name']
),
'quizard/emails/assignment_results_summary.txt'
)
def _send_email(self, assignment, to_address, subject, email_template):
template_instance = get_template(email_template)
context = {
'assignment': assignment,
'points_earned': assignment.calculate_score(self.request.session['answers']),
'questions': self.build_question_dicts(
assignment,
self.request.session['answers'],
),
'assignee_name': self.request.session['assignee_name'],
'DEFAULT_FROM_EMAIL': settings.DEFAULT_FROM_EMAIL,
'BRAND_NAME': settings.BRAND_NAME
}
args = (
subject,
template_instance.render(context),
settings.DEFAULT_FROM_EMAIL,
to_address
)
# Don't try to invoke the task asynchronously in DEBUG mode,
# because it's a dev environment and celery probably isn't configured.
if settings.DEBUG:
return send_mail(*args)
else:
return send_mail.apply_async(args)
| mit |
hj3938/panda3d | direct/src/gui/DirectFrame.py | 8 | 6980 | """Undocumented Module"""
__all__ = ['DirectFrame']
from panda3d.core import *
import DirectGuiGlobals as DGG
from DirectGuiBase import *
from OnscreenImage import OnscreenImage
from OnscreenGeom import OnscreenGeom
import types
class DirectFrame(DirectGuiWidget):
DefDynGroups = ('text', 'geom', 'image')
def __init__(self, parent = None, **kw):
# Inherits from DirectGuiWidget
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
# Each of these has 1 or more states
# The same object can be used for all states or each
# state can have a different text/geom/image (for radio button
# and check button indicators, for example).
optiondefs = (
# Define type of DirectGuiWidget
('pgFunc', PGItem, None),
('numStates', 1, None),
('state', self.inactiveInitState, None),
# Frame can have:
# A background texture
('image', None, self.setImage),
# A midground geometry item
('geom', None, self.setGeom),
# A foreground text node
('text', None, self.setText),
# Change default value of text mayChange flag from 0
# (OnscreenText.py) to 1
('textMayChange', 1, None),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs,
dynamicGroups = DirectFrame.DefDynGroups)
# Initialize superclasses
DirectGuiWidget.__init__(self, parent)
# Call option initialization functions
self.initialiseoptions(DirectFrame)
def destroy(self):
DirectGuiWidget.destroy(self)
def setText(self):
# Determine if user passed in single string or a sequence
if self['text'] == None:
textList = (None,) * self['numStates']
elif isinstance(self['text'], types.StringTypes):
# If just passing in a single string, make a tuple out of it
textList = (self['text'],) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
textList = self['text']
# Create/destroy components
for i in range(self['numStates']):
component = 'text' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
text = textList[i]
except IndexError:
text = textList[-1]
if self.hascomponent(component):
if text == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_text'] = text
else:
if text == None:
return
else:
from OnscreenText import OnscreenText
self.createcomponent(
component, (), 'text',
OnscreenText,
(), parent = self.stateNodePath[i],
text = text, scale = 1, mayChange = self['textMayChange'],
sort = DGG.TEXT_SORT_INDEX,
)
def setGeom(self):
# Determine argument type
geom = self['geom']
if geom == None:
# Passed in None
geomList = (None,) * self['numStates']
elif isinstance(geom, NodePath) or \
isinstance(geom, types.StringTypes):
# Passed in a single node path, make a tuple out of it
geomList = (geom,) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
geomList = geom
# Create/destroy components
for i in range(self['numStates']):
component = 'geom' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
geom = geomList[i]
except IndexError:
geom = geomList[-1]
if self.hascomponent(component):
if geom == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_geom'] = geom
else:
if geom == None:
return
else:
self.createcomponent(
component, (), 'geom',
OnscreenGeom,
(), parent = self.stateNodePath[i],
geom = geom, scale = 1,
sort = DGG.GEOM_SORT_INDEX)
def setImage(self):
# Determine argument type
arg = self['image']
if arg == None:
# Passed in None
imageList = (None,) * self['numStates']
elif isinstance(arg, NodePath) or \
isinstance(arg, Texture) or \
isinstance(arg, types.StringTypes):
# Passed in a single node path, make a tuple out of it
imageList = (arg,) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
if ((len(arg) == 2) and
isinstance(arg[0], types.StringTypes) and
isinstance(arg[1], types.StringTypes)):
# Its a model/node pair of strings
imageList = (arg,) * self['numStates']
else:
# Assume its a list of node paths
imageList = arg
# Create/destroy components
for i in range(self['numStates']):
component = 'image' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
image = imageList[i]
except IndexError:
image = imageList[-1]
if self.hascomponent(component):
if image == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_image'] = image
else:
if image == None:
return
else:
self.createcomponent(
component, (), 'image',
OnscreenImage,
(), parent = self.stateNodePath[i],
image = image, scale = 1,
sort = DGG.IMAGE_SORT_INDEX)
| bsd-3-clause |
macosforge/ccs-calendarserver | calendarserver/tools/agent.py | 1 | 10761 | #!/usr/bin/env python
# -*- test-case-name: calendarserver.tools.test.test_agent -*-
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
A service spawned on-demand by launchd, meant to handle configuration requests
from Server.app. When a request comes in on the socket specified in the
launchd agent.plist, launchd will run "caldavd -t Agent" which ends up creating
this service. Requests are made using HTTP POSTS to /gateway, and are
authenticated by OpenDirectory.
"""
from __future__ import print_function
__all__ = [
"makeAgentService",
]
import cStringIO
from plistlib import readPlistFromString, writePlistToString
import socket
from twext.python.launchd import launchActivateSocket
from twext.python.log import Logger
from twext.who.checker import HTTPDigestCredentialChecker
from twext.who.opendirectory import (
DirectoryService as OpenDirectoryDirectoryService,
NoQOPDigestCredentialFactory
)
from twisted.application.internet import StreamServerEndpointService
from twisted.cred.portal import IRealm, Portal
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.endpoints import AdoptedStreamServerEndpoint
from twisted.internet.protocol import Factory
from twisted.protocols import amp
from twisted.web.guard import HTTPAuthSessionWrapper
from twisted.web.resource import IResource, Resource, ForbiddenResource
from twisted.web.server import Site, NOT_DONE_YET
from zope.interface import implements
log = Logger()
class AgentRealm(object):
"""
Only allow a specified list of avatar IDs to access the site
"""
implements(IRealm)
def __init__(self, root, allowedAvatarIds):
"""
@param root: The root resource of the site
@param allowedAvatarIds: The list of IDs to allow access to
"""
self.root = root
self.allowedAvatarIds = allowedAvatarIds
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
if avatarId.shortNames[0] in self.allowedAvatarIds:
return (IResource, self.root, lambda: None)
else:
return (IResource, ForbiddenResource(), lambda: None)
raise NotImplementedError()
class AgentGatewayResource(Resource):
"""
The gateway resource which forwards incoming requests through
gateway.Runner.
"""
isLeaf = True
def __init__(self, store, directory, inactivityDetector):
"""
@param store: an already opened store
@param directory: a directory service
@param inactivityDetector: the InactivityDetector to tell when requests
come in
"""
Resource.__init__(self)
self.store = store
self.directory = directory
self.inactivityDetector = inactivityDetector
def render_POST(self, request):
"""
Take the body of the POST request and feed it to gateway.Runner();
return the result as the response body.
"""
self.inactivityDetector.activity()
def onSuccess(result, output):
txt = output.getvalue()
output.close()
request.write(txt)
request.finish()
def onError(failure):
message = failure.getErrorMessage()
tbStringIO = cStringIO.StringIO()
failure.printTraceback(file=tbStringIO)
tbString = tbStringIO.getvalue()
tbStringIO.close()
error = {
"Error": message,
"Traceback": tbString,
}
log.error("command failed {error}", error=failure)
request.write(writePlistToString(error))
request.finish()
from calendarserver.tools.gateway import Runner
body = request.content.read()
command = readPlistFromString(body)
output = cStringIO.StringIO()
runner = Runner(self.store, [command], output=output)
d = runner.run()
d.addCallback(onSuccess, output)
d.addErrback(onError)
return NOT_DONE_YET
def makeAgentService(store):
"""
Returns a service which will process GatewayAMPCommands, using a socket
file descripter acquired by launchd
@param store: an already opened store
@returns: service
"""
from twisted.internet import reactor
sockets = launchActivateSocket("AgentSocket")
fd = sockets[0]
family = socket.AF_INET
endpoint = AdoptedStreamServerEndpoint(reactor, fd, family)
directory = store.directoryService()
def becameInactive():
log.warn("Agent inactive; shutting down")
reactor.stop()
from twistedcaldav.config import config
inactivityDetector = InactivityDetector(
reactor, config.AgentInactivityTimeoutSeconds, becameInactive
)
root = Resource()
root.putChild(
"gateway",
AgentGatewayResource(
store, directory, inactivityDetector
)
)
# We need this service to be able to return com.apple.calendarserver,
# so tell it not to suppress system accounts.
directory = OpenDirectoryDirectoryService(
"/Local/Default", suppressSystemRecords=False
)
portal = Portal(
AgentRealm(root, [u"com.apple.calendarserver"]),
[HTTPDigestCredentialChecker(directory)]
)
credentialFactory = NoQOPDigestCredentialFactory(
"md5", "/Local/Default"
)
wrapper = HTTPAuthSessionWrapper(portal, [credentialFactory])
site = Site(wrapper)
return StreamServerEndpointService(endpoint, site)
class InactivityDetector(object):
"""
If no 'activity' takes place for a specified amount of time, a method
will get called. Activity causes the inactivity time threshold to be
reset.
"""
def __init__(self, reactor, timeoutSeconds, becameInactive):
"""
@param reactor: the reactor
@timeoutSeconds: the number of seconds considered to mean inactive
@becameInactive: the method to call (with no arguments) when
inactivity is reached
"""
self._reactor = reactor
self._timeoutSeconds = timeoutSeconds
self._becameInactive = becameInactive
if self._timeoutSeconds > 0:
self._delayedCall = self._reactor.callLater(
self._timeoutSeconds,
self._inactivityThresholdReached
)
def _inactivityThresholdReached(self):
"""
The delayed call has fired. We're inactive. Call the becameInactive
method.
"""
self._becameInactive()
def activity(self):
"""
Call this to let the InactivityMonitor that there has been activity.
It will reset the timeout.
"""
if self._timeoutSeconds > 0:
if self._delayedCall.active():
self._delayedCall.reset(self._timeoutSeconds)
else:
self._delayedCall = self._reactor.callLater(
self._timeoutSeconds,
self._inactivityThresholdReached
)
def stop(self):
"""
Cancels the delayed call
"""
if self._timeoutSeconds > 0:
if self._delayedCall.active():
self._delayedCall.cancel()
#
# Alternate implementation using AMP instead of HTTP
#
class GatewayAMPCommand(amp.Command):
"""
A command to be executed by gateway.Runner
"""
arguments = [('command', amp.String())]
response = [('result', amp.String())]
class GatewayAMPProtocol(amp.AMP):
"""
Passes commands to gateway.Runner and returns the results
"""
def __init__(self, store, directory):
"""
@param store: an already opened store
operations
@param directory: a directory service
"""
amp.AMP.__init__(self)
self.store = store
self.directory = directory
@GatewayAMPCommand.responder
@inlineCallbacks
def gatewayCommandReceived(self, command):
"""
Process a command via gateway.Runner
@param command: GatewayAMPCommand
@returns: a deferred returning a dict
"""
command = readPlistFromString(command)
output = cStringIO.StringIO()
from calendarserver.tools.gateway import Runner
runner = Runner(
self.store,
[command], output=output
)
try:
yield runner.run()
result = output.getvalue()
output.close()
except Exception as e:
error = {"Error": str(e)}
result = writePlistToString(error)
output.close()
returnValue(dict(result=result))
class GatewayAMPFactory(Factory):
"""
Builds GatewayAMPProtocols
"""
protocol = GatewayAMPProtocol
def __init__(self, store):
"""
@param store: an already opened store
"""
self.store = store
self.directory = self.store.directoryService()
def buildProtocol(self, addr):
return GatewayAMPProtocol(
self.store, self.davRootResource, self.directory
)
#
# A test AMP client
#
command = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>command</key>
<string>getLocationAndResourceList</string>
</dict>
</plist>"""
def getList():
# For the sample client, below:
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
creator = ClientCreator(reactor, amp.AMP)
host = '127.0.0.1'
import sys
if len(sys.argv) > 1:
host = sys.argv[1]
d = creator.connectTCP(host, 62308)
def connected(ampProto):
return ampProto.callRemote(GatewayAMPCommand, command=command)
d.addCallback(connected)
def resulted(result):
return result['result']
d.addCallback(resulted)
def done(result):
print('Done: %s' % (result,))
reactor.stop()
d.addCallback(done)
reactor.run()
if __name__ == '__main__':
getList()
| apache-2.0 |
shivam1111/odoo | openerp/report/render/html2html/__init__.py | 381 | 1091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from html2html import parseString
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JioEducation/edx-platform | openedx/core/djangoapps/content/course_overviews/migrations/0002_add_course_catalog_fields.py | 81 | 1065 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_overviews', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='courseoverview',
name='announcement',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='catalog_visibility',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='course_video_url',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='effort',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='courseoverview',
name='short_description',
field=models.TextField(null=True),
),
]
| agpl-3.0 |
rocky/python2-trepan | trepan/bwprocessor/main.py | 1 | 18126 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2010, 2013-2015 Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, linecache, sys, traceback, types
import pyficache
from repr import Repr
from trepan import vprocessor as Mprocessor
from trepan import exception as Mexcept, misc as Mmisc
from trepan.lib import bytecode as Mbytecode, display as Mdisplay
from trepan.lib import thred as Mthread
from trepan.bwprocessor import location as Mlocation, msg as Mmsg
def get_stack(f, t, botframe, proc_obj=None):
"""Return a stack of frames which the debugger will use for in
showing backtraces and in frame switching. As such various frame
that are really around may be excluded unless we are debugging the
sebugger. Also we will add traceback frame on top if that
exists."""
exclude_frame = lambda f: False
if proc_obj:
settings = proc_obj.debugger.settings
if not settings['dbg_trepan']:
exclude_frame = lambda f: \
proc_obj.core.ignore_filter.is_included(f)
pass
pass
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
if exclude_frame(f): break # See commented alternative below
stack.append((f, f.f_lineno))
# bdb has:
# if f is botframe: break
f = f.f_back
pass
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
pass
return stack, i
def run_hooks(obj, hooks, *args):
"""Run each function in `hooks' with args"""
for hook in hooks:
if hook(obj, *args): return True
pass
return False
def resolve_name(obj, command_name):
if command_name not in obj.commands:
return None
return command_name
# Default settings for command processor method call
DEFAULT_PROC_OPTS = {
# A list of debugger initialization files to read on first command
# loop entry. Often this something like [~/.trepanrc] which the
# front-end sets.
'initfile_list' : []
}
class BWProcessor(Mprocessor.Processor):
def __init__(self, core_obj, opts=None):
Mprocessor.Processor.__init__(self, core_obj)
self.response = {'errs': [], 'msg': []}
self.continue_running = False # True if we should leave command loop
self.cmd_instances = self._populate_commands()
self.cmd_name = '' # command name before alias or
# macro resolution
self.current_command = '' # Current command getting run
self.debug_nest = 1
self.display_mgr = Mdisplay.DisplayMgr()
self.intf = core_obj.debugger.intf
self.last_command = None # Initially a no-op
self.precmd_hooks = []
# If not:
# self.location = lambda : print_location(self)
self.preloop_hooks = []
self.postcmd_hooks = []
self._populate_cmd_lists()
# Stop only if line/file is different from last time
self.different_line = None
# These values updated on entry. Set initial values.
self.curframe = None
self.event = None
self.event_arg = None
self.frame = None
self.list_lineno = 0
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
self._repr = Repr()
self._repr.maxstring = 100
self._repr.maxother = 60
self._repr.maxset = 10
self._repr.maxfrozen = 10
self._repr.array = 10
self._saferepr = self._repr.repr
self.stack = []
self.thread_name = None
self.frame_thread_name = None
return
def add_preloop_hook(self, hook, position=-1, nodups = True):
if hook in self.preloop_hooks: return False
self.preloop_hooks.insert(position, hook)
return True
def adjust_frame(self, pos, absolute_pos):
"""Adjust stack frame by pos positions. If absolute_pos then
pos is an absolute number. Otherwise it is a relative number.
A negative number indexes from the other end."""
if not self.curframe:
Mmsg.errmsg(self, "No stack.")
return
# Below we remove any negativity. At the end, pos will be
# the new value of self.curindex.
if absolute_pos:
if pos >= 0:
pos = len(self.stack)-pos-1
else:
pos = -pos-1
else:
pos += self.curindex
if pos < 0:
Mmsg.errmsg(self,
"Adjusting would put us beyond the oldest frame.")
return
elif pos >= len(self.stack):
Mmsg.errmsg(self,
"Adjusting would put us beyond the newest frame.")
return
self.curindex = pos
self.curframe = self.stack[self.curindex][0]
self.print_location()
self.list_lineno = None
return
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
# Consider using is_exec_stmt(). I just don't understand
# the conditions under which the below test is true.
if filename == '<string>' and self.debugger.mainpyfile:
filename = self.debugger.mainpyfile
pass
return filename
def event_processor(self, frame, event, event_arg, prompt='Trepan'):
'command event processor: reading a commands do something with them.'
self.frame = frame
self.event = event
self.event_arg = event_arg
filename = frame.f_code.co_filename
lineno = frame.f_lineno
line = linecache.getline(filename, lineno, frame.f_globals)
if not line:
opts = {'output': 'plain',
'reload_on_change': self.settings('reload'),
'strip_nl': False}
line = pyficache.getline(filename, lineno, opts)
self.current_source_text = line
if self.settings('skip') is not None:
if Mbytecode.is_def_stmt(line, frame):
return True
if Mbytecode.is_class_def(line, frame):
return True
pass
self.thread_name = Mthread.current_thread_name()
self.frame_thread_name = self.thread_name
self.process_commands()
return True
def forget(self):
""" Remove memory of state variables set in the command processor """
self.stack = []
self.curindex = 0
self.curframe = None
self.thread_name = None
self.frame_thread_name = None
return
def eval(self, arg):
"""Eval string arg in the current frame context."""
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
pass
else: exc_type_name = t.__name__
Mmsg.errmsg(self, str("%s: %s" % (exc_type_name, arg)))
raise
return None # Not reached
def exec_line(self, line):
if self.curframe:
local_vars = self.curframe.f_locals
global_vars = self.curframe.f_globals
else:
local_vars = None
# FIXME: should probably have place where the
# user can store variables inside the debug session.
# The setup for this should be elsewhere. Possibly
# in interaction.
global_vars = None
try:
code = compile(line + '\n', '"%s"' % line, 'single')
exec code in global_vars, local_vars
except:
t, v = sys.exc_info()[:2]
if isinstance(t, types.StringType):
exc_type_name = t
else: exc_type_name = t.__name__
Mmsg.errmsg(self, '%s: %s' % (str(exc_type_name), str(v)))
pass
return
def ok_for_running(self, cmd_obj, name, cmd_hash):
'''We separate some of the common debugger command checks here:
whether it makes sense to run the command in this execution state,
if the command has the right number of arguments and so on.
'''
if hasattr(cmd_obj, 'execution_set'):
if not (self.core.execution_status in cmd_obj.execution_set):
part1 = ("Command '%s' is not available for execution "
"status:" % name)
Mmsg.errmsg(self,
Mmisc.
wrapped_lines(part1,
self.core.execution_status,
self.debugger.settings['width']))
return False
pass
if self.frame is None and cmd_obj.need_stack:
self.intf[-1].errmsg("Command '%s' needs an execution stack."
% name)
return False
return True
def process_commands(self):
"""Handle debugger commands."""
if self.core.execution_status != 'No program':
self.setup()
Mlocation.print_location(self, self.event)
pass
leave_loop = run_hooks(self, self.preloop_hooks)
self.continue_running = False
while not leave_loop:
try:
run_hooks(self, self.precmd_hooks)
# bdb had a True return to leave loop.
# A more straight-forward way is to set
# instance variable self.continue_running.
leave_loop = self.process_command()
if leave_loop or self.continue_running: break
except EOFError:
# If we have stacked interfaces, pop to the next
# one. If this is the last one however, we'll
# just stick with that. FIXME: Possibly we should
# check to see if we are interactive. and not
# leave if that's the case. Is this the right
# thing? investigate and fix.
if len(self.debugger.intf) > 1:
del self.debugger.intf[-1]
self.last_command = ''
else:
if self.debugger.intf[-1].output:
self.debugger.intf[-1].output.writeline('Leaving')
raise Mexcept.DebuggerQuit
pass
break
pass
pass
return run_hooks(self, self.postcmd_hooks)
def process_command(self):
# process command
self.response = {'errs': [], 'msg': []}
cmd_hash = self.intf[-1].read_command()
# FIXME: put this into a routine
if isinstance(cmd_hash, types.DictType):
Mmsg.errmsg(self, "invalid input, expecting a hash: %s" % cmd_hash,
{'set_name': True})
self.intf[-1].msg(self.response)
return False
if 'command' not in cmd_hash:
Mmsg.errmsg(self,
"invalid input, expecting a 'command' key: %s" %
cmd_hash,
{'set_name': True})
self.intf[-1].msg(self.response)
return False
self.cmd_name = cmd_hash['command']
cmd_name = resolve_name(self, self.cmd_name)
if cmd_name:
cmd_obj = self.commands[cmd_name]
if self.ok_for_running(cmd_obj, cmd_name, cmd_hash):
try:
self.response['name'] = cmd_name
result = cmd_obj.run(cmd_hash)
self.intf[-1].msg(self.response)
if result: return result
except (Mexcept.DebuggerQuit,
Mexcept.DebuggerRestart, SystemExit):
# Let these exceptions propagate through
raise
except:
Mmsg.errmsg(self, "INTERNAL ERROR: " +
traceback.format_exc())
pass
pass
else:
self.undefined_cmd(cmd_name)
pass
pass
return False
def remove_preloop_hook(self, hook):
try:
position = self.preloop_hooks.index(hook)
except ValueError:
return False
del self.preloop_hooks[position]
return True
def setup(self):
"""Initialization done before entering the debugger-command
loop. In particular we set up the call stack used for local
variable lookup and frame/up/down commands.
We return True if we should NOT enter the debugger-command
loop."""
self.forget()
if self.settings('dbg_trepan'):
self.frame = inspect.currentframe()
pass
if self.event in ['exception', 'c_exception']:
exc_type, exc_value, exc_traceback = self.event_arg
else:
_, _, exc_traceback = (None, None, None,) # NOQA
pass
if self.frame or exc_traceback:
self.stack, self.curindex = \
get_stack(self.frame, exc_traceback, None, self)
self.curframe = self.stack[self.curindex][0]
self.thread_name = Mthread.current_thread_name()
else:
self.stack = self.curframe = \
self.botframe = None
pass
if self.curframe:
self.list_lineno = \
max(1, inspect.getlineno(self.curframe))
else:
self.list_lineno = None
pass
# if self.execRcLines()==1: return True
return False
def undefined_cmd(self, cmd):
"""Error message when a command doesn't exist"""
Mmsg.errmsg(self, 'Undefined command: "%s". Try "help".' % cmd)
return
def _populate_commands(self):
""" Create an instance of each of the debugger
commands. Commands are found by importing files in the
directory 'command'. Some files are excluded via an array set
in __init__. For each of the remaining files, we import them
and scan for class names inside those files and for each class
name, we will create an instance of that class. The set of
DebuggerCommand class instances form set of possible debugger
commands."""
cmd_instances = []
from trepan.bwprocessor import command as Mcommand
eval_cmd_template = 'command_mod.%s(self)'
for mod_name in Mcommand.__modules__:
import_name = "command." + mod_name
try:
command_mod = getattr(__import__(import_name), mod_name)
except:
print('Error importing %s: %s' %
(mod_name, sys.exc_info()[0]))
continue
classnames = [ tup[0] for tup in
inspect.getmembers(command_mod, inspect.isclass)
if ('DebuggerCommand' != tup[0] and
tup[0].endswith('Command')) ]
for classname in classnames:
eval_cmd = eval_cmd_template % classname
try:
instance = eval(eval_cmd)
cmd_instances.append(instance)
except:
print ('Error loading %s from %s: %s' %
(classname, mod_name, sys.exc_info()[0]))
pass
pass
pass
return cmd_instances
def _populate_cmd_lists(self):
""" Populate self.commands"""
self.commands = {}
for cmd_instance in self.cmd_instances:
cmd_name = cmd_instance.name
self.commands[cmd_name] = cmd_instance
pass
return
pass
# Demo it
if __name__=='__main__':
from trepan.interfaces import bullwinkle as Mbullwinkle
class Debugger:
def __init__(self):
self.intf = [Mbullwinkle.BWInterface()]
self.settings = {'dbg_trepan': True, 'reload': False}
pass
class MockCore:
def filename(self, fn): return fn
def canonic_filename(self, frame): return frame.f_code.co_filename
def __init__(self):
self.debugger = Debugger()
return
pass
core = MockCore()
bwproc = BWProcessor(core)
print 'commands:'
commands = bwproc.commands.keys()
commands.sort()
print commands
print resolve_name(bwproc, 'quit')
# print '-' * 10
# print_source_line(sys.stdout.write, 100, 'source_line_test.py')
# print '-' * 10
bwproc.frame = sys._getframe()
bwproc.setup()
# print
# print '-' * 10
Mlocation.print_location(bwproc)
# print 'Removing non-existing quit hook: ', bwproc.remove_preloop_hook(fn)
# bwproc.add_preloop_hook(fn)
# print bwproc.preloop_hooks
# print 'Removed existing quit hook: ', bwproc.remove_preloop_hook(fn)
pass
| gpl-3.0 |
dzo/kernel_ville | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
drewp/commentserve | commentServe.py | 1 | 12072 | #!/usr/bin/python
"""comment storage for blogs, photo site, etc
see also:
sioc:Post sioc:has_reply sioc:Post / types:Comment
sioc:content
content:encoded
dcterms:created
types:BlogPost
types:Comment
"""
import web, time, logging, pystache, traceback
from datetime import datetime
from uuid import uuid4
from html5lib import html5parser, sanitizer
from web.contrib.template import render_genshi
from rdflib import RDF, URIRef, Literal, Namespace
from dateutil.parser import parse
from honeypot import HoneypotChecker
import restkit
from dateutil.tz import tzlocal
import cyclone.web
from twisted.internet import reactor
from db import DbMongo
SIOC = Namespace("http://rdfs.org/sioc/ns#")
CONTENT = Namespace("http://purl.org/rss/1.0/modules/content/")
DCTERMS = Namespace("http://purl.org/dc/terms/")
XS = Namespace("http://www.w3.org/2001/XMLSchema#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
HTTP = Namespace("http://www.w3.org/2006/http#")
OV = Namespace("http://open.vocab.org/terms/")
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
log.setLevel(logging.INFO)
render = render_genshi(['.'], auto_reload=False)
def literalFromUnix(t):
i = datetime.fromtimestamp(int(t)).replace(tzinfo=tzlocal()).isoformat()
return Literal(i, datatype=XS['dateTime'])
def agoString(literalTime):
d = parse(str(literalTime))
# (assuming 'now' is in the same timezone as d)
return web.utils.datestr(d, datetime.now().replace(tzinfo=tzlocal()))
def newPublicUser(forwardedFor, name, email):
"""
a non-logged-in user is posting a comment on a resource that's
open for public comments. We make a new URI for this user (every
time) and store some extra statements.
pass your web post params, which might include 'name' and 'email'.
returns user URI and a list of triples to be stored
"""
stmts = []
user = URIRef('http://bigasterisk.com/guest/%s' % uuid4())
header = URIRef(user + "/header1")
stmts.extend([
(user, RDF.type, FOAF.Person),
(user, DCTERMS.created, literalFromUnix(time.time())),
(user, OV.usedHttpHeader, header),
(header, HTTP.fieldName, Literal('X-Forwarded-For')),
(header, HTTP.fieldValue, Literal(forwardedFor)),
])
if name:
stmts.append((user, FOAF.name, Literal(name)))
if email:
stmts.append((user, FOAF.mbox, URIRef("mailto:%s" % email)))
return user, stmts
def newCommentUri(secs=None):
"""this is essentially a bnode, but a real URI is easier to work with"""
if secs is None:
secs = time.time()
return URIRef("http://bigasterisk.com/comment/%r" % secs)
class AnyCase(sanitizer.HTMLSanitizer):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True):
sanitizer.HTMLSanitizer.__init__(self, stream, encoding, parseMeta,
useChardet,
lowercaseElementName,
lowercaseAttrName)
class AnyCaseNoSrc(AnyCase):
allowed_attributes = AnyCase.allowed_attributes[:]
allowed_attributes.remove('src')
def sanitize_html(stream, srcAttr=False):
ret = ''.join([token.toxml() for token in
html5parser.HTMLParser(tokenizer=AnyCase if srcAttr else AnyCaseNoSrc).
parseFragment(stream).childNodes])
return ret
def spamCheck(article, content):
if content.lower().count("<a href") > 0:
log.error("too many links in %r" % content)
raise ValueError("too many links")
if '[url=' in content:
raise ValueError("url markup is too suspicious")
if content.split()[-1].startswith(('http://', 'https://')):
raise ValueError("please don't end with a link")
if article in [URIRef("http://drewp.quickwitretort.com/2008/02/22/0"),
URIRef("http://drewp.quickwitretort.com/2010/07/03/0"),
]:
raise ValueError("spam flood")
for pat in ['viagra', 'cialis', 'probleme de sante', 'pfizer', 'pilules']:
if pat in content.lower():
raise ValueError("spam pattern")
class Comments(cyclone.web.RequestHandler):
def get(self, public=False):
"""
post=<uri to post> (or use 'uri' for the arg)
returns html formatted comments (until i get some more content types)
"""
t1 = time.time()
post = (self.get_argument("post", default=None) or
self.get_argument("uri", default=None))
if not post:
raise ValueError("need 'uri' param")
post = URIRef(post)
foafAgent = None
try:
foafAgent = URIRef(self.request.headers['X-Foaf-Agent'])
except KeyError:
if not public:
self.write("Must login to see comments")
return
queryTime = time.time()
rows = self.findComments(post)
queryTime = time.time() - queryTime
self.set_header("Content-Type", "text/html")
ret = render.comments(
includeJs=self.get_argument("js", default="0") != "0",
public=public,
parent=post,
toHttps=lambda uri: uri.replace('http://', 'https://'),
agoString=agoString,
you=self.settings.db.value(foafAgent, FOAF.name) if foafAgent else None,
rows=rows,
)
self.write(ret + "<!-- %.2f ms (%.2f ms in query) -->" % (
1000 * (time.time() - t1),
1000 * queryTime))
def findComments(self, post):
rows = []
for who, when, content in self.settings.db.query("""
SELECT DISTINCT ?who ?when ?content WHERE {
?parent sioc:has_reply [
sioc:has_creator ?cr;
content:encoded ?content;
dcterms:created ?when
]
OPTIONAL { ?cr foaf:name ?who }
} ORDER BY ?when""", initBindings={"parent" : post}):
row = dict(who=who, when=when, content=sanitize_html(content))
rows.append(row)
log.debug("found %s rows with parent %r" % (len(rows), post))
return rows
def post(self, public=False):
"""
post=<parent post>
content=<html content>
we get the user from the x-foaf-agent header
"""
parent = self.get_argument('post', default=None) or self.get_argument("uri")
assert parent is not None
# maybe a legacy problem here with http/https, but blaster is still sending http
parent = URIRef(parent)
# this might be failing on ariblog, but that one is already safe
ip = self.request.headers.get("X-Forwarded-For")
if ip is not None:
HoneypotChecker(open("priv-honeypotkey").read().strip()).check(ip)
contentArg = self.get_argument("content", default="")
if not contentArg.strip():
raise ValueError("no text")
if contentArg.strip() == 'test':
return "not adding test comment"
spamCheck(parent, contentArg)
content = Literal(contentArg, datatype=RDF.XMLLiteral)
stmts = [] # gathered in one list for an atomic add
foafHeader = self.request.headers.get('X-Foaf-Agent')
if not public:
assert foafHeader
user = URIRef(foafHeader)
# make bnode-ish users for anonymous ones. need to get that username passed in here
else:
if foafHeader:
user = URIRef(foafHeader)
else:
user, moreStmts = newPublicUser(
self.request.headers.get("X-Forwarded-For"),
self.get_argument("name", ""),
self.get_argument("email", ""))
stmts.extend(moreStmts)
secs = time.time()
comment = newCommentUri(secs)
now = literalFromUnix(secs)
ctx = URIRef(parent + "/comments")
stmts.extend([(parent, SIOC.has_reply, comment),
(comment, DCTERMS.created, now),
(comment, SIOC.has_creator, user),
])
stmts.extend(commentStatements(user, comment, content))
db.writeFile(stmts, ctx, fileWords=[parent.split('/')[-1], now])
try:
self.sendAlerts(parent, user)
except Exception, e:
import traceback
log.error(e)
traceback.print_exc()
self.write("added")
def sendAlerts(self, parent, user):
c3po = restkit.Resource('http://bang:9040/')
for listener, mode in [
('http://bigasterisk.com/foaf.rdf#drewp', 'xmpp'),
('http://bigasterisk.com/kelsi/foaf.rdf#kelsi', 'xmpp')]:
c3po.post(
path='', payload={
'user': listener,
'msg': '%s comment from %s (http://10.1.0.1:9031/)' % (parent, user),
'mode': mode,
},
# shouldn't this be automatic?
headers={'content-type' : 'application/x-www-form-urlencoded'},
)
class CommentCount(cyclone.web.RequestHandler):
def get(self, public=False):
if not public:
try:
self.request.headers['X-Foaf-Agent']
except KeyError:
self.set_header("Content-Type", "text/plain")
self.write("Must login to see comments")
return
post = URIRef(self.get_argument("post"))
rows = self.settings.db.query("""
SELECT DISTINCT ?r WHERE {
?parent sioc:has_reply ?r
}""", initBindings={"parent" : post})
count = len(list(rows))
self.set_header("Content-Type", "text/plain")
self.write("%s comments" % count if count != 1 else "1 comment")
class Root(cyclone.web.RequestHandler):
def get(self):
recent = self.settings.db.getRecentComments(10, notOlderThan=60,
withSpam=False)
self.write(pystache.render(open("index.mustache").read(),
dict(recent=recent)))
class Spam(cyclone.web.RequestHandler):
def post(self):
try:
self.settings.db.setType(docId=self.get_argument('docId'), type="spam")
except Exception:
traceback.print_exc()
raise
self.redirect("/")
def commentStatements(user, commentUri, realComment):
# here you can put more processing on the comment text
realComment = Literal(realComment.replace("\r", ""), datatype=realComment.datatype) # rdflib n3 can't read these back
return [(commentUri, CONTENT.encoded, realComment)]
class Index(cyclone.web.RequestHandler):
def get(self):
self.set_header("Content-Type", "text/plain")
self.write("commentServe")
class Fav(cyclone.web.RequestHandler):
def get(self):
self.write(open("favicon.ico").read())
class Application(cyclone.web.Application):
def __init__(self, db):
handlers = [
(r'/comments', Comments),
(r'/(public)/comments', Comments),
(r'/commentCount', CommentCount),
(r'/(public)/commentCount', CommentCount),
(r'/', Root),
(r'/favicon.ico', Fav),
(r'/spam', Spam),
]
cyclone.web.Application.__init__(self, handlers,
db=db,
template_path=".")
if __name__ == '__main__':
db = DbMongo()
from twisted.python.log import startLogging
import sys
startLogging(sys.stdout)
reactor.listenTCP(9031, Application(db))
reactor.run()
| bsd-2-clause |
peterwilletts24/Python-Scripts | EMBRACE/modules/model_name_convert_title.py | 2 | 1563 | 'Converts model run names e.g djznu to sub title for plots'
def main(experiment_id):
djznu = ('1.5km LAM', '4500 km', '10s', 'L118, 78km lid', 'Explicit 3D SMAG', 'djznu')
dkbhu = ('2.2 km LAM', '4500 km', '10s', 'L118, 78km lid', 'Explicit 3D SMAG', 'dkbhu')
djzns = ('4km LAM', '4500 km', '10s', 'L118, 78km lid', 'Explicit 3D SMAG', 'djzns')
djznq = ('24 km LAM', '4500 km', '600s', 'Global NWP set L70, 80 km lid', '1DBL + conv param', 'djznq')
djzny = ('120 km LAM', '4500 km', '1200s', 'Global NWP set L70, 80 km lid', '1DBL + conv param', 'djzny')
djznw = ('Driving Global', '4500 km', '1200s?', 'Global NWP set L70, 80 km lid', '1DBL + conv param', 'djznw')
dkhgu = ('2.2 km LAM', 'Big', '', '', '', 'dkhgu')
dkjxq = ('24 KM LAM','Big', '', '', '', 'dkjxq')
dklyu = ('8km','Big' ,'10s', 'L118, 78km lid', 'Explicit 3D SMAG', 'dklyu')
dkmbq = ('8km', 'Big', '300s?', '', '1DBL + conv param', 'dkmbq')
dklwu = ('12km', '', '10s', 'L118, 78km lid', 'Explicit 3D SMAG', 'dklwu')
dklzq = ('12km', '', '300s?', '1DBL + conv param', 'dklzq')
experiment_ids = [djznu, dkbhu, djzns, djznq, djzny, djznw, dkhgu, dkjxq, dklyu, dkmbq, dklwu, dklzq ]
for ex in experiment_ids:
if (experiment_id==ex[-1]):
title=ex
mod_sub_title = 'Run: %s, Resolution: %s, Domain: %s, Timestep: %s, Vert. Levels %s, Conv. scheme %s' % (title[-1], title[0], title[1], title[2], title[3], title[4])
return mod_sub_title,
if __name__ == '__main__':
main()
| mit |
joninvski/ts_7500_kernel | scripts/rt-tester/rt-tester.py | 904 | 5366 | #!/usr/bin/env python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
kivymd/KivyMD | demos/shrine/libs/baseclass/box_bottom_sheet.py | 1 | 4932 | from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.properties import BooleanProperty, ObjectProperty, StringProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.uix.recycleview import RecycleView
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import TwoLineAvatarIconListItem
class BoxBottomSheetProductList(RecycleView):
pass
class TotalPriceForBoxBottomSheetProductList(MDBoxLayout):
pass
class ToolbarForBoxBottomSheetProductList(MDBoxLayout):
pass
class ItemForBoxBottomSheetProductList(TwoLineAvatarIconListItem):
pass
class PreviousImage(CircularRippleBehavior, ButtonBehavior, Image):
description = StringProperty()
_root = ObjectProperty()
class BoxBottomSheet(ThemableBehavior, MDBoxLayout):
open_sheet_box = BooleanProperty(False)
def clear_box(self):
while len(self.ids.previous_box.children) != 1:
for widget in self.ids.previous_box.children:
if widget.__class__ is not MDIconButton:
self.ids.previous_box.remove_widget(widget)
def restore_opacity_bottom_sheet(self):
Animation(opacity=1, d=0.2).start(self.ids.previous_box)
Animation(opacity=1, d=0.2).start(self)
def restore_width_bottom_sheet(self):
if len(self.ids.previous_box.children) != 1:
for widget in self.ids.previous_box.children:
self.ids.previous_box.width += widget.width
self.width += widget.width
self.ids.previous_box.height = dp(48)
if self.parent.ids.box_bottom_sheet_product_list.width == 0:
Animation(width=self.width + dp(48), d=0.2).start(self)
def remove_box_list(self, *args):
self.parent.ids.box_bottom_sheet_product_list.data = []
self.restore_width_bottom_sheet()
self.restore_opacity_bottom_sheet()
def hide_box_bottom_sheet(self):
Animation(width=0, d=0.2).start(self)
Animation(opacity=0, d=0.2).start(self)
def do_open_bottom_sheet(self, *args):
total_price = 0
count_item = 0
for widget in self.ids.previous_box.children:
if widget.__class__ is PreviousImage:
count_item += 1
total_price += int(
float(widget.description.split("\n")[1].split("$ ")[1])
)
self.parent.ids.box_bottom_sheet_product_list.data.append(
{
"viewclass": "ItemForBoxBottomSheetProductList",
"height": dp(72),
"path_to_image": widget.source,
"description": widget.description,
}
)
self.parent.ids.box_bottom_sheet_product_list.data.insert(
0,
{
"viewclass": "ToolbarForBoxBottomSheetProductList",
"count_item": count_item,
"callback": self.hide_bottom_sheet,
},
)
self.parent.ids.box_bottom_sheet_product_list.data.append(
{
"viewclass": "TotalPriceForBoxBottomSheetProductList",
"total_price": str(total_price),
}
)
Animation(opacity=1, d=0.2).start(
self.parent.ids.box_bottom_sheet_product_list
)
self.show_clear_button()
def show_clear_button(self):
self.parent.ids.clear_button.opacity = 1
self.parent.ids.clear_button.disabled = False
self.parent.ids.clear_button.grow()
def hide_clear_button(self, *args):
def hide_clear_button(interval):
self.parent.ids.clear_button.opacity = 0
self.parent.ids.clear_button.disabled = True
self.parent.ids.clear_button.grow()
Clock.schedule_once(hide_clear_button, 0.2)
def hide_bottom_sheet(self, *args):
Animation.stop_all(self)
self.hide_clear_button()
Animation(opacity=0, d=0.2).start(
self.parent.ids.box_bottom_sheet_product_list
)
animation = Animation(
height=Window.height // 3, width=Window.width // 2, d=0.1
) + Animation(height=dp(68), width=dp(68), d=0.2)
animation.bind(on_complete=self.remove_box_list)
animation.start(self)
self.open_sheet_box = False
def open_bottom_sheet(self):
Animation.stop_all(self)
anim = Animation(
height=Window.height // 2, width=Window.width, d=0.1
) + Animation(height=Window.height, d=0.1)
anim.bind(on_complete=self.do_open_bottom_sheet)
anim.start(self)
self.open_sheet_box = True
| mit |
pilou-/ansible | test/units/modules/network/f5/test_bigip_dns_resolver.py | 16 | 3687 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_dns_resolver import ApiParameters
from library.modules.bigip_dns_resolver import ModuleParameters
from library.modules.bigip_dns_resolver import ModuleManager
from library.modules.bigip_dns_resolver import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_dns_resolver import ApiParameters
from ansible.modules.network.f5.bigip_dns_resolver import ModuleParameters
from ansible.modules.network.f5.bigip_dns_resolver import ModuleManager
from ansible.modules.network.f5.bigip_dns_resolver import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
route_domain=10,
cache_size=1234,
answer_default_zones=True,
randomize_query_case=False,
use_ipv4=True,
use_ipv6=False,
use_udp=True,
use_tcp=False,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.route_domain == '/Common/10'
assert p.cache_size == 1234
assert p.answer_default_zones == 'yes'
assert p.randomize_query_case == 'no'
assert p.use_ipv4 == 'yes'
assert p.use_ipv6 == 'no'
assert p.use_tcp == 'no'
assert p.use_udp == 'yes'
def test_api_parameters(self):
args = load_fixture('load_net_dns_resolver_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
JacobCallahan/robottelo | tests/foreman/api/test_location.py | 2 | 10317 | """Unit tests for the ``locations`` paths.
A full API reference for locations can be found here:
http://theforeman.org/api/apidoc/v2/locations.html
:Requirement: Location
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: OrganizationsLocations
:Assignee: shwsingh
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import randint
import pytest
from fauxfactory import gen_integer
from fauxfactory import gen_string
from nailgun import entities
from requests.exceptions import HTTPError
from robottelo.cleanup import capsule_cleanup
from robottelo.cli.factory import make_proxy
from robottelo.constants import DEFAULT_LOC
from robottelo.datafactory import filtered_datapoint
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import parametrized
@filtered_datapoint
def valid_loc_data_list():
"""List of valid data for input testing.
Note: The maximum allowed length of location name is 246 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return dict(
alpha=gen_string('alpha', randint(1, 246)),
numeric=gen_string('numeric', randint(1, 246)),
alphanumeric=gen_string('alphanumeric', randint(1, 246)),
latin1=gen_string('latin1', randint(1, 246)),
utf8=gen_string('utf8', randint(1, 85)),
cjk=gen_string('cjk', randint(1, 85)),
html=gen_string('html', randint(1, 85)),
)
class TestLocation:
"""Tests for the ``locations`` path."""
# TODO Add coverage for media, realms as soon as they're implemented
@pytest.fixture
def make_proxies(self, options=None):
"""Create a Proxy"""
proxy1 = make_proxy(options=options)
proxy2 = make_proxy(options=options)
yield dict(proxy1=proxy1, proxy2=proxy2)
capsule_cleanup(proxy1['id'])
capsule_cleanup(proxy2['id'])
@pytest.fixture
def make_orgs(self):
"""Create two organizations"""
return dict(org=entities.Organization().create(), org2=entities.Organization().create())
@pytest.fixture
def make_entities(self):
"""Set up reusable entities for tests."""
return dict(
domain=entities.Domain().create(),
subnet=entities.Subnet().create(),
env=entities.Environment().create(),
host_group=entities.HostGroup().create(),
template=entities.ProvisioningTemplate().create(),
test_cr=entities.LibvirtComputeResource().create(),
new_user=entities.User().create(),
)
@pytest.mark.tier1
@pytest.mark.parametrize('name', **parametrized(valid_loc_data_list()))
def test_positive_create_with_name(self, name):
"""Create new locations using different inputs as a name
:id: 90bb90a3-120f-4ea6-89a9-62757be42486
:expectedresults: Location created successfully and has expected and
correct name
:CaseImportance: Critical
:parametrized: yes
"""
location = entities.Location(name=name).create()
assert location.name == name
@pytest.mark.tier1
def test_positive_create_and_delete_with_comma_separated_name(self):
"""Create new location using name that has comma inside, delete location
:id: 3131e99d-b278-462e-a650-a5a4f4e0a2f1
:expectedresults: Location created successfully and has expected name
"""
name = '{}, {}'.format(gen_string('alpha'), gen_string('alpha'))
location = entities.Location(name=name).create()
assert location.name == name
location.delete()
with pytest.raises(HTTPError):
location.read()
@pytest.mark.tier2
def test_positive_create_and_update_with_org(self, make_orgs):
"""Create new location with assigned organization to it
:id: 5032a93f-4b37-4c19-b6d3-26e3a868d0f1
:expectedresults: Location created successfully and has correct
organization assigned to it with expected title
:CaseLevel: Integration
"""
location = entities.Location(organization=[make_orgs['org']]).create()
assert location.organization[0].id == make_orgs['org'].id
assert location.organization[0].read().title == make_orgs['org'].title
orgs = [make_orgs['org'], make_orgs['org2']]
location.organization = orgs
location = location.update(['organization'])
assert {org.id for org in orgs} == {org.id for org in location.organization}
@pytest.mark.tier1
@pytest.mark.parametrize('name', **parametrized(invalid_values_list()))
def test_negative_create_with_name(self, name):
"""Attempt to create new location using invalid names only
:id: 320e6bca-5645-423b-b86a-2b6f35c8dae3
:expectedresults: Location is not created and expected error is raised
:CaseImportance: Critical
:parametrized: yes
"""
with pytest.raises(HTTPError):
entities.Location(name=name).create()
@pytest.mark.tier1
def test_negative_create_with_same_name(self):
"""Attempt to create new location using name of existing entity
:id: bc09acb3-9ecf-4d23-b3ef-94f24e16e6db
:expectedresults: Location is not created and expected error is raised
:CaseImportance: Critical
"""
name = gen_string('alphanumeric')
location = entities.Location(name=name).create()
assert location.name == name
with pytest.raises(HTTPError):
entities.Location(name=name).create()
@pytest.mark.tier1
def test_negative_create_with_domain(self):
"""Attempt to create new location using non-existent domain identifier
:id: 5449532d-7959-4547-ba05-9e194eea495d
:expectedresults: Location is not created and expected error is raised
"""
with pytest.raises(HTTPError):
entities.Location(domain=[gen_integer(10000, 99999)]).create()
@pytest.mark.tier1
@pytest.mark.parametrize('new_name', **parametrized(valid_loc_data_list()))
def test_positive_update_name(self, new_name):
"""Update location with new name
:id: 73ff6dab-e12a-4f7d-9c1f-6984fc076329
:expectedresults: Location updated successfully and name was changed
:CaseImportance: Critical
:parametrized: yes
"""
location = entities.Location().create()
location.name = new_name
assert location.update(['name']).name == new_name
@pytest.mark.tier2
def test_positive_update_entities(self, make_entities):
"""Update location with new domain
:id: 1016dfb9-8103-45f1-8738-0579fa9754c1
:expectedresults: Location updated successfully and has correct domain
assigned
:CaseLevel: Integration
"""
location = entities.Location().create()
location.domain = [make_entities["domain"]]
location.subnet = [make_entities["subnet"]]
location.environment = [make_entities["env"]]
location.hostgroup = [make_entities["host_group"]]
location.provisioning_template = [make_entities["template"]]
location.compute_resource = [make_entities["test_cr"]]
location.user = [make_entities["new_user"]]
assert location.update(['domain']).domain[0].id == make_entities["domain"].id
assert location.update(['subnet']).subnet[0].id == make_entities["subnet"].id
assert location.update(['environment']).environment[0].id == make_entities["env"].id
assert location.update(['hostgroup']).hostgroup[0].id == make_entities["host_group"].id
ct_list = [
ct
for ct in location.update(['provisioning_template']).provisioning_template
if ct.id == make_entities["template"].id
]
assert len(ct_list) == 1
assert (
location.update(['compute_resource']).compute_resource[0].id
== make_entities["test_cr"].id
)
assert location.compute_resource[0].read().provider == 'Libvirt'
assert location.update(['user']).user[0].id == make_entities["new_user"].id
@pytest.mark.run_in_one_thread
@pytest.mark.tier2
def test_positive_create_update_and_remove_capsule(self, make_proxies):
"""Update location with new capsule
:id: 2786146f-f466-4ed8-918a-5f46806558e2
:expectedresults: Location updated successfully and has correct capsule
assigned
:BZ: 1398695
:CaseLevel: Integration
:CaseImportance: Critical
"""
proxy_id_1 = make_proxies['proxy1']['id']
proxy_id_2 = make_proxies['proxy2']['id']
proxy = entities.SmartProxy(id=proxy_id_1).read()
location = entities.Location(smart_proxy=[proxy]).create()
new_proxy = entities.SmartProxy(id=proxy_id_2).read()
location.smart_proxy = [new_proxy]
location = location.update(['smart_proxy'])
assert location.smart_proxy[0].id == new_proxy.id
assert location.smart_proxy[0].read().name == new_proxy.name
location.smart_proxy = []
location = location.update(['smart_proxy'])
assert len(location.smart_proxy) == 0
@pytest.mark.tier2
def test_negative_update_domain(self):
"""Try to update existing location with incorrect domain. Use
domain id
:id: e26c92f2-42cb-4706-9e03-3e00a134cb9f
:expectedresults: Location is not updated
:CaseLevel: Integration
"""
location = entities.Location(domain=[entities.Domain().create()]).create()
domain = entities.Domain().create()
location.domain[0].id = gen_integer(10000, 99999)
with pytest.raises(HTTPError):
assert location.update(['domain']).domain[0].id != domain.id
@pytest.mark.tier1
def test_default_loc_id_check(self):
"""test to check the default_location id
:id: 3c89d63b-d5fb-4f05-9efb-f560f0194c85
:BZ: 1713269
:expectedresults: The default_location ID remain 2.
"""
default_loc_id = entities.Location().search(query={'search': f'name="{DEFAULT_LOC}"'})[0].id
assert default_loc_id == 2
| gpl-3.0 |
wtsi-hgi/irobot | irobot/authentication/_http.py | 1 | 7456 | """
Copyright (c) 2017 Genome Research Ltd.
Author: Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import atexit
import logging
from abc import abstractmethod
from threading import Lock, Timer
from typing import Dict, NamedTuple, Optional
from aiohttp import ClientSession, ClientResponse
from irobot.authentication._base import AuthenticatedUser, BaseAuthHandler
from irobot.authentication.parser import HTTPAuthMethod, ParseError, auth_parser
from irobot.config import Configuration
from irobot.logs import LogWriter
class HTTPValidatorParameters(NamedTuple):
""" Parameters for the HTTP validator """
url: str # URL to make the authentication response to
payload: str # Challenge response payload
method: str = "GET" # HTTP method
headers: Dict[str, str] = {} # Additional request headers
class BaseHTTPAuthHandler(LogWriter, BaseAuthHandler):
""" Base HTTP-based authentication handler with logging and caching """
## Implement these #################################################
@abstractmethod
def match_auth_method(self, challenge_response: HTTPAuthMethod) -> bool:
"""
Test the given challenge response matches the requirements of
the handler class
@params challenge_response Authentication challenge response (HTTPAuthMethod)
@return Match (bool)
"""
@abstractmethod
def set_handler_parameters(self, challenge_response: HTTPAuthMethod) -> HTTPValidatorParameters:
"""
Set the parameters for the authentication challenge response
@params challenge_response Authentication challenge reponse (HTTPAuthMethod)
@return Authentication request parameters (HTTPValidatorParameters)
"""
@abstractmethod
async def get_authenticated_user(self, challenge_response: HTTPAuthMethod,
auth_response: ClientResponse) -> AuthenticatedUser:
"""
Get the user from the authentication challenge response and any
response back from the authentication server
@params challenge_response Authentication challenge response (HTTPAuthMethod)
@param auth_response Response from authentication request (ClientResponse)
@return Authenticated user (AuthenticatedUser)
"""
####################################################################
def __init__(self, config: Configuration, logger: Optional[logging.Logger]=None) -> None:
"""
Constructor
@param config Authentication configuration
@param logger Logger
"""
super().__init__(logger=logger)
self._config = config
# Get the first word of the WWW-Authenticate string
self._auth_method, *_ = self.www_authenticate.split()
# Initialise the cache, if required
if self._config.cache:
self.log(logging.DEBUG, f"Creating {self._auth_method} authentication cache")
self._cache: Dict[HTTPAuthMethod, AuthenticatedUser] = {}
self._cache_lock = Lock()
self._schedule_cleanup()
atexit.register(self._cleanup_timer.cancel)
def _schedule_cleanup(self) -> None:
""" Initialise and start the clean up timer """
self._cleanup_timer = Timer(self._config.cache.total_seconds(), self._cleanup)
self._cleanup_timer.daemon = True
self._cleanup_timer.start()
def __del__(self) -> None:
""" Cancel any running clean up timer on GC """
if self._config.cache and self._cleanup_timer.is_alive():
self._cleanup_timer.cancel()
def _cleanup(self) -> None:
""" Clean up expired entries from the cache """
with self._cache_lock:
self.log(logging.DEBUG, f"Cleaning {self._auth_method} authentication cache")
for key, user in list(self._cache.items()):
if not user.valid(self._config.cache):
del self._cache[key]
self._schedule_cleanup()
async def _validate_request(self, params: HTTPValidatorParameters) -> Optional[ClientResponse]:
"""
Asynchronously make an authentication request to check validity
@param params Challenge response validator parameters (HTTPValidatorParameters)
@return Authentication response (ClientResponse; None on failure)
"""
async with ClientSession() as session:
req_headers = {
"Authorization": params.payload,
**params.headers
}
async with session.request(params.method, params.url, headers=req_headers) as response:
if 200 <= response.status < 300:
self.log(logging.DEBUG, f"{self._auth_method} authenticated")
return response
if response.status in [401, 403]:
self.log(logging.WARNING, f"{self._auth_method} couldn't authenticate")
else:
response.raise_for_status()
return None
async def authenticate(self, auth_header: str) -> Optional[AuthenticatedUser]:
"""
Validate the authorisation header
@param auth_header Contents of the "Authorization" header (string)
@return Authenticated user (AuthenticatedUser)
"""
try:
_auth_methods = auth_parser(auth_header)
challenge_response, *_ = filter(self.match_auth_method, _auth_methods)
except ParseError:
self.log(logging.WARNING,
f"{self._auth_method} authentication handler couldn't parse authentication header")
return None
except ValueError:
self.log(logging.ERROR, f"No HTTP {self._auth_method} authentication handler available")
return None
# Check the cache
if self._config.cache:
with self._cache_lock:
if challenge_response in self._cache:
user = self._cache[challenge_response]
if user.valid(self._config.cache):
self.log(logging.DEBUG, f"Authenticated user \"{user.user}\" from cache")
return user
# Clean up expired users
del self._cache[challenge_response]
auth_response = await self._validate_request(self.set_handler_parameters(challenge_response))
if auth_response:
user = await self.get_authenticated_user(challenge_response, auth_response)
# Put validated user in the cache
if self._config.cache:
with self._cache_lock:
self._cache[challenge_response] = user
return user
return None
| gpl-3.0 |
hyperized/ansible | lib/ansible/modules/cloud/google/gcp_compute_instance.py | 5 | 66410 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_instance
description:
- An instance is a virtual machine (VM) hosted on Google's infrastructure.
short_description: Creates a GCP Instance
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
can_ip_forward:
description:
- Allows this instance to send and receive packets with non-matching destination
or source IPs. This is required if you plan to use this instance to forward
routes.
required: false
type: bool
aliases:
- ip_forward
deletion_protection:
description:
- Whether the resource should be protected against deletion.
required: false
type: bool
version_added: 2.9
disks:
description:
- An array of disks that are associated with the instances that are created from
this template.
required: false
type: list
suboptions:
auto_delete:
description:
- Specifies whether the disk will be auto-deleted when the instance is deleted
(but not when the disk is detached from the instance).
- 'Tip: Disks should be set to autoDelete=true so that leftover disks are
not left behind on machine deletion.'
required: false
type: bool
boot:
description:
- Indicates that this is a boot disk. The virtual machine will use the first
partition of the disk for its root filesystem.
required: false
type: bool
device_name:
description:
- Specifies a unique device name of your choice that is reflected into the
/dev/disk/by-id/google-* tree of a Linux operating system running within
the instance. This name can be used to reference the device for mounting,
resizing, and so on, from within the instance.
required: false
type: str
disk_encryption_key:
description:
- Encrypts or decrypts a disk using a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC
4648 base64 to either encrypt or decrypt this resource.
required: false
type: str
rsa_encrypted_key:
description:
- Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied
encryption key to either encrypt or decrypt this resource.
required: false
type: str
index:
description:
- Assigns a zero-based index to this disk, where 0 is reserved for the boot
disk. For example, if you have many disks attached to an instance, each
disk would have a unique index number. If not specified, the server will
choose an appropriate value.
required: false
type: int
initialize_params:
description:
- Specifies the parameters for a new disk that will be created alongside the
new instance. Use initialization parameters to create boot disks or local
SSDs attached to the new instance.
required: false
type: dict
suboptions:
disk_name:
description:
- Specifies the disk name. If not specified, the default is to use the
name of the instance.
required: false
type: str
disk_size_gb:
description:
- Specifies the size of the disk in base-2 GB.
required: false
type: int
disk_type:
description:
- Reference to a disk type.
- Specifies the disk type to use to create the instance.
- If not specified, the default is pd-standard.
required: false
type: str
source_image:
description:
- The source image to create this disk. When creating a new instance,
one of initializeParams.sourceImage or disks.source is required. To
create a disk with one of the public operating system images, specify
the image by its family name.
required: false
type: str
aliases:
- image
- image_family
source_image_encryption_key:
description:
- The customer-supplied encryption key of the source image. Required if
the source image is protected by a customer-supplied encryption key.
- Instance templates do not store customer-supplied encryption keys, so
you cannot create disks for instances in a managed instance group if
the source images are encrypted with your own keys.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in
RFC 4648 base64 to either encrypt or decrypt this resource.
required: false
type: str
interface:
description:
- Specifies the disk interface to use for attaching this disk, which is either
SCSI or NVME. The default is SCSI.
- Persistent disks must always use SCSI and the request will fail if you attempt
to attach a persistent disk in any other format than SCSI.
- 'Some valid choices include: "SCSI", "NVME"'
required: false
type: str
mode:
description:
- The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If
not specified, the default is to attach the disk in READ_WRITE mode.
- 'Some valid choices include: "READ_WRITE", "READ_ONLY"'
required: false
type: str
source:
description:
- Reference to a disk. When creating a new instance, one of initializeParams.sourceImage
or disks.source is required.
- If desired, you can also attach existing non-root persistent disks using
this property. This field is only applicable for persistent disks.
- 'This field represents a link to a Disk resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and
value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_disk task and then set this source field
to "{{ name-of-resource }}"'
required: false
type: dict
type:
description:
- Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified,
the default is PERSISTENT.
- 'Some valid choices include: "SCRATCH", "PERSISTENT"'
required: false
type: str
guest_accelerators:
description:
- List of the type and count of accelerator cards attached to the instance .
required: false
type: list
suboptions:
accelerator_count:
description:
- The number of the guest accelerator cards exposed to this instance.
required: false
type: int
accelerator_type:
description:
- Full or partial URL of the accelerator type resource to expose to this instance.
required: false
type: str
hostname:
description:
- The hostname of the instance to be created. The specified hostname must be RFC1035
compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal
when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal
when using zonal DNS.
required: false
type: str
version_added: 2.9
labels:
description:
- Labels to apply to this instance. A list of key->value pairs.
required: false
type: dict
version_added: 2.9
metadata:
description:
- The metadata key/value pairs to assign to instances that are created from this
template. These pairs can consist of custom metadata or predefined keys.
required: false
type: dict
machine_type:
description:
- A reference to a machine type which defines VM kind.
required: false
type: str
min_cpu_platform:
description:
- Specifies a minimum CPU platform for the VM instance. Applicable values are
the friendly names of CPU platforms .
required: false
type: str
name:
description:
- The name of the resource, provided by the client when initially creating the
resource. The resource name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase
letter, and all following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
required: false
type: str
network_interfaces:
description:
- An array of configurations for this interface. This specifies how this interface
is configured to interact with other network services, such as connecting to
the internet. Only one network interface is supported per instance.
required: false
type: list
suboptions:
access_configs:
description:
- An array of configurations for this interface. Currently, only one access
config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified,
then this instance will have no external internet access.
required: false
type: list
suboptions:
name:
description:
- The name of this access configuration. The default and recommended name
is External NAT but you can use any arbitrary string you would like.
For example, My external IP or Network Access.
required: true
type: str
nat_ip:
description:
- Reference to an address.
- An external IP address associated with this instance.
- Specify an unused static external IP address available to the project
or leave this field undefined to use an IP from a shared ephemeral IP
address pool. If you specify a static external IP address, it must live
in the same region as the zone of the instance.
- 'This field represents a link to a Address resource in GCP. It can be
specified in two ways. First, you can place a dictionary with key ''address''
and value of your resource''s address Alternatively, you can add `register:
name-of-resource` to a gcp_compute_address task and then set this nat_ip
field to "{{ name-of-resource }}"'
required: false
type: dict
type:
description:
- The type of configuration. The default and only option is ONE_TO_ONE_NAT.
- 'Some valid choices include: "ONE_TO_ONE_NAT"'
required: true
type: str
alias_ip_ranges:
description:
- An array of alias IP ranges for this network interface. Can only be specified
for network interfaces on subnet-mode networks.
required: false
type: list
suboptions:
ip_cidr_range:
description:
- The IP CIDR range represented by this alias IP range.
- This IP CIDR range must belong to the specified subnetwork and cannot
contain IP addresses reserved by system or used by other network interfaces.
This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g.
/24) or a CIDR format string (e.g. 10.1.2.0/24).
required: false
type: str
subnetwork_range_name:
description:
- Optional subnetwork secondary range name specifying the secondary range
from which to allocate the IP CIDR range for this alias IP range. If
left unspecified, the primary range of the subnetwork will be used.
required: false
type: str
network:
description:
- Specifies the title of an existing network. Not setting the network title
will select the default network interface, which could have SSH already
configured .
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and
value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_network task and then set this network
field to "{{ name-of-resource }}"'
required: false
type: dict
network_ip:
description:
- An IPv4 internal network address to assign to the instance for this network
interface. If not specified by the user, an unused internal IP is assigned
by the system.
required: false
type: str
subnetwork:
description:
- Reference to a VPC network.
- If the network resource is in legacy mode, do not provide this property.
If the network is in auto subnet mode, providing the subnetwork is optional.
If the network is in custom subnet mode, then this field should be specified.
- 'This field represents a link to a Subnetwork resource in GCP. It can be
specified in two ways. First, you can place a dictionary with key ''selfLink''
and value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_subnetwork task and then set this subnetwork
field to "{{ name-of-resource }}"'
required: false
type: dict
scheduling:
description:
- Sets the scheduling options for this instance.
required: false
type: dict
suboptions:
automatic_restart:
description:
- Specifies whether the instance should be automatically restarted if it is
terminated by Compute Engine (not terminated by a user).
- You can only set the automatic restart option for standard instances. Preemptible
instances cannot be automatically restarted.
required: false
type: bool
on_host_maintenance:
description:
- Defines the maintenance behavior for this instance. For standard instances,
the default behavior is MIGRATE. For preemptible instances, the default
and only possible behavior is TERMINATE.
- For more information, see Setting Instance Scheduling Options.
required: false
type: str
preemptible:
description:
- Defines whether the instance is preemptible. This can only be set during
instance creation, it cannot be set or changed after the instance has been
created.
required: false
type: bool
service_accounts:
description:
- A list of service accounts, with their specified scopes, authorized for this
instance. Only one service account per VM instance is supported.
required: false
type: list
suboptions:
email:
description:
- Email address of the service account.
required: false
type: str
scopes:
description:
- The list of scopes to be made available for this service account.
required: false
type: list
shielded_instance_config:
description:
- Configuration for various parameters related to shielded instances.
required: false
type: dict
version_added: 2.9
suboptions:
enable_secure_boot:
description:
- Defines whether the instance has Secure Boot enabled.
required: false
type: bool
enable_vtpm:
description:
- Defines whether the instance has the vTPM enabled.
required: false
type: bool
enable_integrity_monitoring:
description:
- Defines whether the instance has integrity monitoring enabled.
required: false
type: bool
status:
description:
- 'The status of the instance. One of the following values: PROVISIONING, STAGING,
RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.'
- As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine
off .
- 'Some valid choices include: "PROVISIONING", "STAGING", "RUNNING", "STOPPING",
"SUSPENDING", "SUSPENDED", "TERMINATED"'
required: false
type: str
version_added: 2.8
tags:
description:
- A list of tags to apply to this instance. Tags are used to identify valid sources
or targets for network firewalls and are specified by the client during instance
creation. The tags can be later modified by the setTags method. Each tag within
the list must comply with RFC1035.
required: false
type: dict
suboptions:
fingerprint:
description:
- Specifies a fingerprint for this request, which is essentially a hash of
the metadata's contents and used for optimistic locking.
- The fingerprint is initially generated by Compute Engine and changes after
every request to modify or update metadata. You must always provide an up-to-date
fingerprint hash in order to update or change metadata.
required: false
type: str
items:
description:
- An array of tags. Each tag must be 1-63 characters long, and comply with
RFC1035.
required: false
type: list
zone:
description:
- A reference to the zone where the machine resides.
required: true
type: str
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: disk-instance
size_gb: 50
source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a network
gcp_compute_network:
name: network-instance
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a address
gcp_compute_address:
name: address-instance
region: us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a instance
gcp_compute_instance:
name: test_object
machine_type: n1-standard-1
disks:
- auto_delete: 'true'
boot: 'true'
source: "{{ disk }}"
metadata:
startup-script-url: gs:://graphite-playground/bootstrap.sh
cost-center: '12345'
labels:
environment: production
network_interfaces:
- network: "{{ network }}"
access_configs:
- name: External NAT
nat_ip: "{{ address }}"
type: ONE_TO_ONE_NAT
zone: us-central1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
canIpForward:
description:
- Allows this instance to send and receive packets with non-matching destination
or source IPs. This is required if you plan to use this instance to forward routes.
returned: success
type: bool
cpuPlatform:
description:
- The CPU platform used by this instance.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deletionProtection:
description:
- Whether the resource should be protected against deletion.
returned: success
type: bool
disks:
description:
- An array of disks that are associated with the instances that are created from
this template.
returned: success
type: complex
contains:
autoDelete:
description:
- Specifies whether the disk will be auto-deleted when the instance is deleted
(but not when the disk is detached from the instance).
- 'Tip: Disks should be set to autoDelete=true so that leftover disks are not
left behind on machine deletion.'
returned: success
type: bool
boot:
description:
- Indicates that this is a boot disk. The virtual machine will use the first
partition of the disk for its root filesystem.
returned: success
type: bool
deviceName:
description:
- Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-*
tree of a Linux operating system running within the instance. This name can
be used to reference the device for mounting, resizing, and so on, from within
the instance.
returned: success
type: str
diskEncryptionKey:
description:
- Encrypts or decrypts a disk using a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
rsaEncryptedKey:
description:
- Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied
encryption key to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
index:
description:
- Assigns a zero-based index to this disk, where 0 is reserved for the boot
disk. For example, if you have many disks attached to an instance, each disk
would have a unique index number. If not specified, the server will choose
an appropriate value.
returned: success
type: int
initializeParams:
description:
- Specifies the parameters for a new disk that will be created alongside the
new instance. Use initialization parameters to create boot disks or local
SSDs attached to the new instance.
returned: success
type: complex
contains:
diskName:
description:
- Specifies the disk name. If not specified, the default is to use the name
of the instance.
returned: success
type: str
diskSizeGb:
description:
- Specifies the size of the disk in base-2 GB.
returned: success
type: int
diskType:
description:
- Reference to a disk type.
- Specifies the disk type to use to create the instance.
- If not specified, the default is pd-standard.
returned: success
type: str
sourceImage:
description:
- The source image to create this disk. When creating a new instance, one
of initializeParams.sourceImage or disks.source is required. To create
a disk with one of the public operating system images, specify the image
by its family name.
returned: success
type: str
sourceImageEncryptionKey:
description:
- The customer-supplied encryption key of the source image. Required if
the source image is protected by a customer-supplied encryption key.
- Instance templates do not store customer-supplied encryption keys, so
you cannot create disks for instances in a managed instance group if the
source images are encrypted with your own keys.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC
4648 base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied
encryption key that protects this resource.
returned: success
type: str
interface:
description:
- Specifies the disk interface to use for attaching this disk, which is either
SCSI or NVME. The default is SCSI.
- Persistent disks must always use SCSI and the request will fail if you attempt
to attach a persistent disk in any other format than SCSI.
returned: success
type: str
mode:
description:
- The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If
not specified, the default is to attach the disk in READ_WRITE mode.
returned: success
type: str
source:
description:
- Reference to a disk. When creating a new instance, one of initializeParams.sourceImage
or disks.source is required.
- If desired, you can also attach existing non-root persistent disks using this
property. This field is only applicable for persistent disks.
returned: success
type: dict
type:
description:
- Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified,
the default is PERSISTENT.
returned: success
type: str
guestAccelerators:
description:
- List of the type and count of accelerator cards attached to the instance .
returned: success
type: complex
contains:
acceleratorCount:
description:
- The number of the guest accelerator cards exposed to this instance.
returned: success
type: int
acceleratorType:
description:
- Full or partial URL of the accelerator type resource to expose to this instance.
returned: success
type: str
hostname:
description:
- The hostname of the instance to be created. The specified hostname must be RFC1035
compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal
when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal
when using zonal DNS.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
labels:
description:
- Labels to apply to this instance. A list of key->value pairs.
returned: success
type: dict
metadata:
description:
- The metadata key/value pairs to assign to instances that are created from this
template. These pairs can consist of custom metadata or predefined keys.
returned: success
type: dict
machineType:
description:
- A reference to a machine type which defines VM kind.
returned: success
type: str
minCpuPlatform:
description:
- Specifies a minimum CPU platform for the VM instance. Applicable values are the
friendly names of CPU platforms .
returned: success
type: str
name:
description:
- The name of the resource, provided by the client when initially creating the resource.
The resource name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
networkInterfaces:
description:
- An array of configurations for this interface. This specifies how this interface
is configured to interact with other network services, such as connecting to the
internet. Only one network interface is supported per instance.
returned: success
type: complex
contains:
accessConfigs:
description:
- An array of configurations for this interface. Currently, only one access
config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified,
then this instance will have no external internet access.
returned: success
type: complex
contains:
name:
description:
- The name of this access configuration. The default and recommended name
is External NAT but you can use any arbitrary string you would like. For
example, My external IP or Network Access.
returned: success
type: str
natIP:
description:
- Reference to an address.
- An external IP address associated with this instance.
- Specify an unused static external IP address available to the project
or leave this field undefined to use an IP from a shared ephemeral IP
address pool. If you specify a static external IP address, it must live
in the same region as the zone of the instance.
returned: success
type: dict
type:
description:
- The type of configuration. The default and only option is ONE_TO_ONE_NAT.
returned: success
type: str
aliasIpRanges:
description:
- An array of alias IP ranges for this network interface. Can only be specified
for network interfaces on subnet-mode networks.
returned: success
type: complex
contains:
ipCidrRange:
description:
- The IP CIDR range represented by this alias IP range.
- This IP CIDR range must belong to the specified subnetwork and cannot
contain IP addresses reserved by system or used by other network interfaces.
This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g.
/24) or a CIDR format string (e.g. 10.1.2.0/24).
returned: success
type: str
subnetworkRangeName:
description:
- Optional subnetwork secondary range name specifying the secondary range
from which to allocate the IP CIDR range for this alias IP range. If left
unspecified, the primary range of the subnetwork will be used.
returned: success
type: str
name:
description:
- The name of the network interface, generated by the server. For network devices,
these are eth0, eth1, etc .
returned: success
type: str
network:
description:
- Specifies the title of an existing network. Not setting the network title
will select the default network interface, which could have SSH already configured
.
returned: success
type: dict
networkIP:
description:
- An IPv4 internal network address to assign to the instance for this network
interface. If not specified by the user, an unused internal IP is assigned
by the system.
returned: success
type: str
subnetwork:
description:
- Reference to a VPC network.
- If the network resource is in legacy mode, do not provide this property. If
the network is in auto subnet mode, providing the subnetwork is optional.
If the network is in custom subnet mode, then this field should be specified.
returned: success
type: dict
scheduling:
description:
- Sets the scheduling options for this instance.
returned: success
type: complex
contains:
automaticRestart:
description:
- Specifies whether the instance should be automatically restarted if it is
terminated by Compute Engine (not terminated by a user).
- You can only set the automatic restart option for standard instances. Preemptible
instances cannot be automatically restarted.
returned: success
type: bool
onHostMaintenance:
description:
- Defines the maintenance behavior for this instance. For standard instances,
the default behavior is MIGRATE. For preemptible instances, the default and
only possible behavior is TERMINATE.
- For more information, see Setting Instance Scheduling Options.
returned: success
type: str
preemptible:
description:
- Defines whether the instance is preemptible. This can only be set during instance
creation, it cannot be set or changed after the instance has been created.
returned: success
type: bool
serviceAccounts:
description:
- A list of service accounts, with their specified scopes, authorized for this instance.
Only one service account per VM instance is supported.
returned: success
type: complex
contains:
email:
description:
- Email address of the service account.
returned: success
type: str
scopes:
description:
- The list of scopes to be made available for this service account.
returned: success
type: list
shieldedInstanceConfig:
description:
- Configuration for various parameters related to shielded instances.
returned: success
type: complex
contains:
enableSecureBoot:
description:
- Defines whether the instance has Secure Boot enabled.
returned: success
type: bool
enableVtpm:
description:
- Defines whether the instance has the vTPM enabled.
returned: success
type: bool
enableIntegrityMonitoring:
description:
- Defines whether the instance has integrity monitoring enabled.
returned: success
type: bool
status:
description:
- 'The status of the instance. One of the following values: PROVISIONING, STAGING,
RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.'
- As a user, use RUNNING to keep a machine "on" and TERMINATED to turn a machine
off .
returned: success
type: str
statusMessage:
description:
- An optional, human-readable explanation of the status.
returned: success
type: str
tags:
description:
- A list of tags to apply to this instance. Tags are used to identify valid sources
or targets for network firewalls and are specified by the client during instance
creation. The tags can be later modified by the setTags method. Each tag within
the list must comply with RFC1035.
returned: success
type: complex
contains:
fingerprint:
description:
- Specifies a fingerprint for this request, which is essentially a hash of the
metadata's contents and used for optimistic locking.
- The fingerprint is initially generated by Compute Engine and changes after
every request to modify or update metadata. You must always provide an up-to-date
fingerprint hash in order to update or change metadata.
returned: success
type: str
items:
description:
- An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.
returned: success
type: list
zone:
description:
- A reference to the zone where the machine resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
can_ip_forward=dict(type='bool', aliases=['ip_forward']),
deletion_protection=dict(type='bool'),
disks=dict(
type='list',
elements='dict',
options=dict(
auto_delete=dict(type='bool'),
boot=dict(type='bool'),
device_name=dict(type='str'),
disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), rsa_encrypted_key=dict(type='str'))),
index=dict(type='int'),
initialize_params=dict(
type='dict',
options=dict(
disk_name=dict(type='str'),
disk_size_gb=dict(type='int'),
disk_type=dict(type='str'),
source_image=dict(type='str', aliases=['image', 'image_family']),
source_image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
),
),
interface=dict(type='str'),
mode=dict(type='str'),
source=dict(type='dict'),
type=dict(type='str'),
),
),
guest_accelerators=dict(type='list', elements='dict', options=dict(accelerator_count=dict(type='int'), accelerator_type=dict(type='str'))),
hostname=dict(type='str'),
labels=dict(type='dict'),
metadata=dict(type='dict'),
machine_type=dict(type='str'),
min_cpu_platform=dict(type='str'),
name=dict(type='str'),
network_interfaces=dict(
type='list',
elements='dict',
options=dict(
access_configs=dict(
type='list',
elements='dict',
options=dict(name=dict(required=True, type='str'), nat_ip=dict(type='dict'), type=dict(required=True, type='str')),
),
alias_ip_ranges=dict(type='list', elements='dict', options=dict(ip_cidr_range=dict(type='str'), subnetwork_range_name=dict(type='str'))),
network=dict(type='dict'),
network_ip=dict(type='str'),
subnetwork=dict(type='dict'),
),
),
scheduling=dict(
type='dict', options=dict(automatic_restart=dict(type='bool'), on_host_maintenance=dict(type='str'), preemptible=dict(type='bool'))
),
service_accounts=dict(type='list', elements='dict', options=dict(email=dict(type='str'), scopes=dict(type='list', elements='str'))),
shielded_instance_config=dict(
type='dict', options=dict(enable_secure_boot=dict(type='bool'), enable_vtpm=dict(type='bool'), enable_integrity_monitoring=dict(type='bool'))
),
status=dict(type='str'),
tags=dict(type='dict', options=dict(fingerprint=dict(type='str'), items=dict(type='list', elements='str'))),
zone=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#instance'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
if fetch:
instance = InstancePower(module, fetch.get('status'))
instance.run()
if module.params.get('status'):
fetch.update({'status': module.params['status']})
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('deletionProtection') != request.get('deletionProtection'):
deletion_protection_update(module, request, response)
if response.get('labels') != request.get('labels'):
label_fingerprint_update(module, request, response)
if response.get('machineType') != request.get('machineType'):
machine_type_update(module, request, response)
if response.get('shieldedInstanceConfig') != request.get('shieldedInstanceConfig'):
shielded_instance_config_update(module, request, response)
def label_fingerprint_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setLabels"]).format(**module.params),
{u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')},
)
def machine_type_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/setMachineType"]).format(**module.params),
{u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params)},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#instance',
u'canIpForward': module.params.get('can_ip_forward'),
u'deletionProtection': module.params.get('deletion_protection'),
u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(),
u'guestAccelerators': InstanceGuestacceleratorsArray(module.params.get('guest_accelerators', []), module).to_request(),
u'hostname': module.params.get('hostname'),
u'labels': module.params.get('labels'),
u'metadata': module.params.get('metadata'),
u'machineType': machine_type_selflink(module.params.get('machine_type'), module.params),
u'minCpuPlatform': module.params.get('min_cpu_platform'),
u'name': module.params.get('name'),
u'networkInterfaces': InstanceNetworkinterfacesArray(module.params.get('network_interfaces', []), module).to_request(),
u'scheduling': InstanceScheduling(module.params.get('scheduling', {}), module).to_request(),
u'serviceAccounts': InstanceServiceaccountsArray(module.params.get('service_accounts', []), module).to_request(),
u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(module.params.get('shielded_instance_config', {}), module).to_request(),
u'status': module.params.get('status'),
u'tags': InstanceTags(module.params.get('tags', {}), module).to_request(),
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'canIpForward': response.get(u'canIpForward'),
u'cpuPlatform': response.get(u'cpuPlatform'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deletionProtection': response.get(u'deletionProtection'),
u'disks': InstanceDisksArray(module.params.get('disks', []), module).to_request(),
u'guestAccelerators': InstanceGuestacceleratorsArray(response.get(u'guestAccelerators', []), module).from_response(),
u'hostname': response.get(u'hostname'),
u'id': response.get(u'id'),
u'labelFingerprint': response.get(u'labelFingerprint'),
u'labels': response.get(u'labels'),
u'metadata': response.get(u'metadata'),
u'machineType': response.get(u'machineType'),
u'minCpuPlatform': response.get(u'minCpuPlatform'),
u'name': response.get(u'name'),
u'networkInterfaces': InstanceNetworkinterfacesArray(response.get(u'networkInterfaces', []), module).from_response(),
u'scheduling': InstanceScheduling(response.get(u'scheduling', {}), module).from_response(),
u'serviceAccounts': InstanceServiceaccountsArray(response.get(u'serviceAccounts', []), module).from_response(),
u'shieldedInstanceConfig': InstanceShieldedinstanceconfig(response.get(u'shieldedInstanceConfig', {}), module).from_response(),
u'status': response.get(u'status'),
u'statusMessage': response.get(u'statusMessage'),
u'tags': InstanceTags(response.get(u'tags', {}), module).from_response(),
}
def disk_type_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name
return name
def machine_type_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/machineTypes/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/machineTypes/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
response = fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instance')
if response:
return decode_response(response, module)
else:
return {}
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
def encode_request(request, module):
if 'metadata' in request and request['metadata'] is not None:
request['metadata'] = metadata_encoder(request['metadata'])
return request
def decode_response(response, module):
if 'metadata' in response and response['metadata'] is not None:
response['metadata'] = metadata_decoder(response['metadata'])
return response
# TODO(alexstephen): Implement updating metadata on existing resources.
# Expose instance 'metadata' as a simple name/value pair hash. However the API
# defines metadata as a NestedObject with the following layout:
#
# metadata {
# fingerprint: 'hash-of-last-metadata'
# items: [
# {
# key: 'metadata1-key'
# value: 'metadata1-value'
# },
# ...
# ]
# }
#
def metadata_encoder(metadata):
metadata_new = []
for key in metadata:
value = metadata[key]
metadata_new.append({"key": key, "value": value})
return {'items': metadata_new}
# Map metadata.items[]{key:,value:} => metadata[key]=value
def metadata_decoder(metadata):
items = {}
if 'items' in metadata:
metadata_items = metadata['items']
for item in metadata_items:
items[item['key']] = item['value']
return items
class InstancePower(object):
def __init__(self, module, current_status):
self.module = module
self.current_status = current_status
self.desired_status = self.module.params.get('status')
def run(self):
# GcpRequest handles unicode text handling
if GcpRequest({'status': self.current_status}) == GcpRequest({'status': self.desired_status}):
return
elif self.desired_status == 'RUNNING':
self.start()
elif self.desired_status == 'TERMINATED':
self.stop()
elif self.desired_status == 'SUSPENDED':
self.module.fail_json(msg="Instances cannot be suspended using Ansible")
def start(self):
auth = GcpSession(self.module, 'compute')
wait_for_operation(self.module, auth.post(self._start_url()))
def stop(self):
auth = GcpSession(self.module, 'compute')
wait_for_operation(self.module, auth.post(self._stop_url()))
def _start_url(self):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/start".format(**self.module.params)
def _stop_url(self):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instances/{name}/stop".format(**self.module.params)
def deletion_protection_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(
[
"https://www.googleapis.com/compute/v1/",
"projects/{project}/zones/{zone}/instances/{name}/setDeletionProtection?deletionProtection={deletionProtection}",
]
).format(**module.params),
{},
)
def shielded_instance_config_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/instances/{name}/updateShieldedInstanceConfig"]).format(
**module.params
),
{
u'enableSecureBoot': navigate_hash(module.params, ['shielded_instance_config', 'enable_secure_boot']),
u'enableVtpm': navigate_hash(module.params, ['shielded_instance_config', 'enable_vtpm']),
u'enableIntegrityMonitoring': navigate_hash(module.params, ['shielded_instance_config', 'enable_integrity_monitoring']),
},
)
class InstanceDisksArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'autoDelete': item.get('auto_delete'),
u'boot': item.get('boot'),
u'deviceName': item.get('device_name'),
u'diskEncryptionKey': InstanceDiskencryptionkey(item.get('disk_encryption_key', {}), self.module).to_request(),
u'index': item.get('index'),
u'initializeParams': InstanceInitializeparams(item.get('initialize_params', {}), self.module).to_request(),
u'interface': item.get('interface'),
u'mode': item.get('mode'),
u'source': replace_resource_dict(item.get(u'source', {}), 'selfLink'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'autoDelete': item.get(u'autoDelete'),
u'boot': item.get(u'boot'),
u'deviceName': item.get(u'deviceName'),
u'diskEncryptionKey': InstanceDiskencryptionkey(item.get(u'diskEncryptionKey', {}), self.module).from_response(),
u'index': item.get(u'index'),
u'initializeParams': InstanceInitializeparams(self.module.params.get('initialize_params', {}), self.module).to_request(),
u'interface': item.get(u'interface'),
u'mode': item.get(u'mode'),
u'source': item.get(u'source'),
u'type': item.get(u'type'),
}
)
class InstanceDiskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'rsaEncryptedKey': self.request.get('rsa_encrypted_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'rsaEncryptedKey': self.request.get(u'rsaEncryptedKey')})
class InstanceInitializeparams(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'diskName': self.request.get('disk_name'),
u'diskSizeGb': self.request.get('disk_size_gb'),
u'diskType': disk_type_selflink(self.request.get('disk_type'), self.module.params),
u'sourceImage': self.request.get('source_image'),
u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get('source_image_encryption_key', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'diskName': self.request.get(u'diskName'),
u'diskSizeGb': self.request.get(u'diskSizeGb'),
u'diskType': self.request.get(u'diskType'),
u'sourceImage': self.request.get(u'sourceImage'),
u'sourceImageEncryptionKey': InstanceSourceimageencryptionkey(self.request.get(u'sourceImageEncryptionKey', {}), self.module).from_response(),
}
)
class InstanceSourceimageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
class InstanceGuestacceleratorsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'acceleratorCount': item.get('accelerator_count'), u'acceleratorType': item.get('accelerator_type')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'acceleratorCount': item.get(u'acceleratorCount'), u'acceleratorType': item.get(u'acceleratorType')})
class InstanceNetworkinterfacesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'accessConfigs': InstanceAccessconfigsArray(item.get('access_configs', []), self.module).to_request(),
u'aliasIpRanges': InstanceAliasiprangesArray(item.get('alias_ip_ranges', []), self.module).to_request(),
u'network': replace_resource_dict(item.get(u'network', {}), 'selfLink'),
u'networkIP': item.get('network_ip'),
u'subnetwork': replace_resource_dict(item.get(u'subnetwork', {}), 'selfLink'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'accessConfigs': InstanceAccessconfigsArray(item.get(u'accessConfigs', []), self.module).from_response(),
u'aliasIpRanges': InstanceAliasiprangesArray(item.get(u'aliasIpRanges', []), self.module).from_response(),
u'network': item.get(u'network'),
u'networkIP': item.get(u'networkIP'),
u'subnetwork': item.get(u'subnetwork'),
}
)
class InstanceAccessconfigsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{u'name': item.get('name'), u'natIP': replace_resource_dict(item.get(u'nat_ip', {}), 'address'), u'type': item.get('type')}
)
def _response_from_item(self, item):
return remove_nones_from_dict({u'name': item.get(u'name'), u'natIP': item.get(u'natIP'), u'type': item.get(u'type')})
class InstanceAliasiprangesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'ipCidrRange': item.get('ip_cidr_range'), u'subnetworkRangeName': item.get('subnetwork_range_name')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'ipCidrRange': item.get(u'ipCidrRange'), u'subnetworkRangeName': item.get(u'subnetworkRangeName')})
class InstanceScheduling(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'automaticRestart': self.request.get('automatic_restart'),
u'onHostMaintenance': self.request.get('on_host_maintenance'),
u'preemptible': self.request.get('preemptible'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'automaticRestart': self.request.get(u'automaticRestart'),
u'onHostMaintenance': self.request.get(u'onHostMaintenance'),
u'preemptible': self.request.get(u'preemptible'),
}
)
class InstanceServiceaccountsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'email': item.get('email'), u'scopes': item.get('scopes')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'email': item.get(u'email'), u'scopes': item.get(u'scopes')})
class InstanceShieldedinstanceconfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'enableSecureBoot': self.request.get('enable_secure_boot'),
u'enableVtpm': self.request.get('enable_vtpm'),
u'enableIntegrityMonitoring': self.request.get('enable_integrity_monitoring'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'enableSecureBoot': self.request.get(u'enableSecureBoot'),
u'enableVtpm': self.request.get(u'enableVtpm'),
u'enableIntegrityMonitoring': self.request.get(u'enableIntegrityMonitoring'),
}
)
class InstanceTags(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'fingerprint': self.request.get('fingerprint'), u'items': self.request.get('items')})
def from_response(self):
return remove_nones_from_dict({u'fingerprint': self.request.get(u'fingerprint'), u'items': self.request.get(u'items')})
if __name__ == '__main__':
main()
| gpl-3.0 |
sernst/Trackway-Gait-Analysis | tracksim/cli/commands/list_.py | 1 | 1859 | from argparse import ArgumentParser
from tracksim import system
from tracksim import reader
from tracksim import paths
from tracksim import cli
DESCRIPTION = """
Removes all existing group and trial results from cached results folders
"""
def list_groups():
system.log('===== GROUPS =====', whitespace_bottom=1)
results_path = paths.results('group.html')
for uid, data_path in reader.listings('group').items():
url = 'file://{}?id={}'.format(results_path, uid)
system.log(
"""
--- {uid} ---
{url}
""".format(uid=uid, url=url),
whitespace_bottom=1
)
def list_trials():
system.log('===== TRIALS =====', whitespace_bottom=1)
results_path = paths.results('trials.html')
for uid, data_path in reader.listings('trial').items():
url = 'file://{}?id={}'.format(results_path, uid)
system.log(
"""
--- {uid} ---
{url}
""".format(uid=uid, url=url),
whitespace_bottom=1
)
def execute_command():
"""
:return:
"""
parser = ArgumentParser()
parser.description = cli.reformat(DESCRIPTION)
parser.add_argument(
'list_command',
type=str,
help='The list command itself'
)
parser.add_argument(
'report_type',
type=str,
nargs='?',
default=None,
help='The type of report to list.'
)
args = vars(parser.parse_args())
report_type = args['report_type']
if not report_type:
report_type = 'all'
else:
report_type = report_type.lower()
print('')
if report_type[0] == 'g':
list_groups()
elif report_type[0] == 't':
list_trials()
else:
list_groups()
print('')
list_trials()
| mit |
stclair/wes-cms | django/db/backends/creation.py | 79 | 22561 | import sys
import time
from django.conf import settings
from django.utils.datastructures import DictWrapper
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def db_type(self, field):
return self._db_type(field, field.get_internal_type())
def related_db_type(self, field):
return self._db_type(field, field.get_related_internal_type())
def _db_type(self, field, internal_type):
data = DictWrapper(field.__dict__, self.connection.ops.quote_name, "qn_")
try:
return self.connection.creation.data_types[internal_type] % data
except KeyError:
return None
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| bsd-3-clause |
Swiftea/Crawler | crawler/index/database_ii.py | 2 | 3746 | from time import time
from pymongo import IndexModel, TEXT
from pymongo.write_concern import WriteConcern
from pymodm import connection, MongoModel, EmbeddedMongoModel, fields, errors
from crawler.index import index
class Word(MongoModel):
word = fields.CharField()
documents = fields.DictField(blank=True)
language = fields.CharField()
class Meta:
write_concern = WriteConcern(w=1)
connection_alias = 'my-app'
final = True
indexes = [IndexModel([('word', TEXT)])]
def connect(MONGODB_CON_STRING, database_name='inverted_index'):
db_url = MONGODB_CON_STRING.format(database_name)
connection.connect(db_url, alias="my-app")
def add_word(word, doc_id, nb_words, language, occurrence):
tf = round(occurrence / nb_words, 7)
try:
w = Word.objects.raw({
'$text': {'$search': "\"{}\"".format(word), '$language': language},
'language': language
}).first()
if w.word != word:
raise errors.DoesNotExist
except errors.DoesNotExist:
w = Word(word, {doc_id: tf}, language).save()
else:
if doc_id in w.documents:
if w.documents[doc_id] != tf:
return
w.documents[doc_id] = tf
w.save()
def add_doc(keywords, doc_id, language):
nb_words = len(keywords)
begining = time()
language = {'fr': 'french', 'en':'english'}[language]
for word in keywords:
add_word(word[0], doc_id, nb_words, language, word[1])
t = time() - begining
with open('stats/stat_up_index', 'a') as myfile:
myfile.write(str(t) + '\n')
def delete_word(id):
Word.objects.get({'_id': id}).delete()
def delete_doc(doc_id, language='*'):
"""Use language to limit the raws to read."""
if language == '*':
words = Word.objects.all()
else:
words = Word.objects.raw({'language': language})
for word in words:
if doc_id in word.documents:
if len(word.documents) == 1:
delete_word(word._id)
else:
del word.documents[doc_id]
word.save()
#
# def test():
# """Test functions"""
# add_word('camion', '5', 80, 'fr', 3)
# add_word('camion', '6', 30, 'fr', 1)
# add_word('mercato', '7', 30, 'it', 11)
# add_word('action', '36', 25, 'fr', 2)
# add_word('action', '37', 62, 'en', 3)
#
# add_doc(['buongiorno', 'capisco', 'chiamo', 'chiamo'], '7', 'it')
#
# # delete_doc('7', 'it')
# delete_doc('36', 'fr')
# delete_doc('5', 'en')
#
# def experimentations():
# # create
# Word('répondre', {'1': 0.1418, '7': 0.0319}, 'fr').save()
# Word('delete', {'1': 0, '7': 0}, 'en').save()
# Word('but', {'1': 0, '7': 0}, 'en').save()
# Word('but', {'2': 0.185, '6': 0.13}, 'fr').save()
# # Word('action', {'2': 0.185, '6': 0.13}, 'fr').save()
# # Word('action', {'2': 0.185, '6': 0.13, '12': 0.185}, 'fr').save()
# # Word('action', {'25': 0.1841487}, 'fr').save()
# # Word('avion', {}, 'fr').save() # {'documents': ['must not be blank (was: {})']}
# # Word('voiture', None, 'fr').save() # {'documents': ['must not be blank (was: None)']}
# Word('voiture', language='fr').save()
#
#
# # get
# # w = Word.objects.get({'word': 'vélo'}) # __main__.DoesNotExist
# w = Word.objects.raw({'word': 'vélo'})
# # print(w)
# # print(w.values())
# # print(dir(w))
# # print(w.first()) # __main__.DoesNotExist
#
# # update: add doc
# w = Word.objects.get({'word': 'répondre'})
# w.documents['8'] = 0.00001
# w.save()
#
# # update: update doc
# w = Word.objects.get({'word': 'répondre'})
# w.documents['8'] = 0.00005
# w.save()
#
# # delete word
# w = Word.objects.get({'word': 'delete', 'language': 'en'}).delete()
#
# # delete doc
# doc_id = '7'
# for word in Word.objects.raw({'language': 'fr'}):
# if doc_id in word.documents:
# del word.documents[doc_id]
# word.save()
#
# # # TODO: index: language.word
if __name__ == '__main__':
connect('test')
# experimentations()
# test()
| gpl-3.0 |
MFoster/breeze | django/contrib/gis/tests/distapp/tests.py | 104 | 19051 | from __future__ import absolute_import
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import GEOSGeometry, LineString
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import oracle, postgis, spatialite, no_oracle, no_spatialite
from django.test import TestCase
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
class DistanceTest(TestCase):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transormed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test01_init(self):
"Test initialization of distance models."
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test02_dwithin(self):
"Testing the `dwithin` lookup type."
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple): dist1, dist2 = dist
else: dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle: type_error = True
else: type_error = False
if isinstance(dist, tuple):
if oracle: dist = dist[1]
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's s
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
if oracle: tol = 2
else: tol = 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test03b_distance_method(self):
"Testing the `distance` GeoQuerySet method on geodetic coordnate systems."
if oracle: tol = 2
else: tol = 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString( ( (150.902, -34.4245), (150.87, -34.5789) ) )
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test03c_distance_method(self):
"Testing the `distance` GeoQuerySet method used with `transform` on a geographic field."
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test04_distance_lookups(self):
"Testing the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types."
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test05_geodetic_distance_lookups(self):
"Testing distance lookups on geodetic coordinate systems."
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite: return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test06_area(self):
"Testing the `area` GeoQuerySet method."
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test07_length(self):
"Testing the `length` GeoQuerySet method."
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
if oracle: tol = 2
else: tol = 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test08_perimeter(self):
"Testing the `perimeter` GeoQuerySet method."
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
if oracle: tol = 2
else: tol = 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test09_measurement_null_fields(self):
"Testing the measurement GeoQuerySet methods on fields with NULL values."
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertEqual(None, z.distance)
self.assertEqual(None, z.area)
| bsd-3-clause |
Ayub-Khan/edx-platform | common/test/acceptance/tests/studio/base_studio_test.py | 12 | 5145 | """
Base classes used by studio tests.
"""
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...fixtures.course import CourseFixture
from ...fixtures.library import LibraryFixture
from ..helpers import UniqueCourseTest
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.utils import verify_ordering
class StudioCourseTest(UniqueCourseTest):
"""
Base class for all Studio course tests.
"""
def setUp(self, is_staff=False):
"""
Install a course with no content using a fixture.
"""
super(StudioCourseTest, self).setUp()
self.install_course_fixture(is_staff)
def install_course_fixture(self, is_staff=False):
"""
Install a course fixture
"""
self.course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
self.user = self.course_fixture.user
self.log_in(self.user, is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the course. The user will be given instructor access
to the course and enrolled in it. By default the user will not have staff access unless
is_staff is passed as True.
Args:
user(dict): dictionary containing user data: {'username': ..., 'email': ..., 'password': ...}
is_staff(bool): register this user as staff
"""
self.auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
self.auth_page.visit()
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
def setUp(self, is_staff=False):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp(is_staff=is_staff)
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_nested_container_page(self):
"""
Go to the nested container page.
"""
unit = self.go_to_unit_page()
# The 0th entry is the unit page itself.
container = unit.xblocks[1].go_to_container()
return container
def go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section(section_name).subsection(subsection_name)
return subsection.expand_subsection().unit(unit_name).go_to()
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_nested_container_page()
action(container)
verify_ordering(self, container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_nested_container_page()
verify_ordering(self, container, expected_ordering)
class StudioLibraryTest(WebAppTest):
"""
Base class for all Studio library tests.
"""
as_staff = True
def setUp(self):
"""
Install a library with no content using a fixture.
"""
super(StudioLibraryTest, self).setUp()
fixture = LibraryFixture(
'test_org',
self.unique_id,
'Test Library {}'.format(self.unique_id),
)
self.populate_library_fixture(fixture)
fixture.install()
self.library_fixture = fixture
self.library_info = fixture.library_info
self.library_key = fixture.library_key
self.user = fixture.user
self.log_in(self.user, self.as_staff)
def populate_library_fixture(self, library_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the library.
By default the user will not have staff access unless is_staff is passed as True.
"""
auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
auth_page.visit()
| agpl-3.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/flask_wtf/recaptcha/validators.py | 91 | 2398 | try:
import urllib2 as http
except ImportError:
# Python 3
from urllib import request as http
from flask import request, current_app
from wtforms import ValidationError
from werkzeug import url_encode
from .._compat import to_bytes, to_unicode
import json
RECAPTCHA_VERIFY_SERVER = 'https://www.google.com/recaptcha/api/siteverify'
RECAPTCHA_ERROR_CODES = {
'missing-input-secret': 'The secret parameter is missing.',
'invalid-input-secret': 'The secret parameter is invalid or malformed.',
'missing-input-response': 'The response parameter is missing.',
'invalid-input-response': 'The response parameter is invalid or malformed.'
}
__all__ = ["Recaptcha"]
class Recaptcha(object):
"""Validates a ReCaptcha."""
def __init__(self, message=None):
if message is None:
message = RECAPTCHA_ERROR_CODES['missing-input-response']
self.message = message
def __call__(self, form, field):
if current_app.testing:
return True
if request.json:
response = request.json.get('g-recaptcha-response', '')
else:
response = request.form.get('g-recaptcha-response', '')
remote_ip = request.remote_addr
if not response:
raise ValidationError(field.gettext(self.message))
if not self._validate_recaptcha(response, remote_ip):
field.recaptcha_error = 'incorrect-captcha-sol'
raise ValidationError(field.gettext(self.message))
def _validate_recaptcha(self, response, remote_addr):
"""Performs the actual validation."""
try:
private_key = current_app.config['RECAPTCHA_PRIVATE_KEY']
except KeyError:
raise RuntimeError("No RECAPTCHA_PRIVATE_KEY config set")
data = url_encode({
'secret': private_key,
'remoteip': remote_addr,
'response': response
})
http_response = http.urlopen(RECAPTCHA_VERIFY_SERVER, to_bytes(data))
if http_response.code != 200:
return False
json_resp = json.loads(to_unicode(http_response.read()))
if json_resp["success"]:
return True
for error in json_resp.get("error-codes", []):
if error in RECAPTCHA_ERROR_CODES:
raise ValidationError(RECAPTCHA_ERROR_CODES[error])
return False
| apache-2.0 |
chris-chris/tensorflow | tensorflow/contrib/solvers/python/ops/linear_equations.py | 117 | 4452 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solvers for linear equations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def conjugate_gradient(operator,
rhs,
tol=1e-4,
max_iter=20,
name="conjugate_gradient"):
r"""Conjugate gradient solver.
Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
definite matrix `A` and righ-hand side vector `rhs`, using an iterative,
matrix-free algorithm where the action of the matrix A is represented by
`operator`. The iteration terminates when either the number of iterations
exceeds `max_iter` or when the residual norm has been reduced to `tol`
times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
`[N, N]`.
- dtype: The datatype of input to and output from `apply`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(||r||_2^2\\)
"""
# ephemeral class holding CG state.
cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, state.gamma > tol)
# TODO(rmlarsen): add preconditioning
def cg_step(i, state):
z = operator.apply(state.p)
alpha = state.gamma / util.dot(state.p, z)
x = state.x + alpha * state.p
r = state.r - alpha * z
gamma = util.l2norm_squared(r)
beta = gamma / state.gamma
p = r + beta * state.p
return i + 1, cg_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
gamma0 = util.l2norm_squared(rhs)
tol = tol * tol * gamma0
x = array_ops.expand_dims(
array_ops.zeros(
n, dtype=rhs.dtype.base_dtype), -1)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cg_state(i=i, x=x, r=rhs, p=rhs, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
[i, state])
return cg_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
| apache-2.0 |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/jinja2/nodes.py | 130 | 29392 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type, PY2
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
def dump(self):
def _dump(node):
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append('nodes.%s(' % node.__class__.__name__)
if not node.fields:
buf.append(')')
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(', ')
value = getattr(node, field)
if isinstance(value, list):
buf.append('[')
for idx, item in enumerate(value):
if idx:
buf.append(', ')
_dump(item)
buf.append(']')
else:
_dump(value)
buf.append(')')
buf = []
_dump(self)
return ''.join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ('targets', 'values', 'body')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
rv = self.value
if PY2 and type(rv) is text_type and \
self.environment.policies['compiler.ascii_str']:
try:
rv = rv.encode('ascii')
except UnicodeError:
pass
return rv
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
if eval_ctx.environment.is_async and \
getattr(filter_, 'asyncfiltervariant', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [obj] + [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(*args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| apache-2.0 |
llonchj/sentry | src/sentry/api/permissions.py | 2 | 1954 | from __future__ import absolute_import
from rest_framework import permissions
from sentry.models import OrganizationMemberType, ProjectKey
class NoPermission(permissions.BasePermission):
def has_permission(self, request, view):
return False
class ScopedPermission(permissions.BasePermission):
"""
Permissions work depending on the type of authentication:
- A user inherits permissions based on their membership role. These are
still dictated as common scopes, but they can't be checked until the
has_object_permission hook is called.
- ProjectKeys (legacy) are granted only project based scopes. This
- APIKeys specify their scope, and work as expected.
"""
scope_map = {
'GET': (),
'POST': (),
'PUT': (),
'PATCH': (),
'DELETE': (),
}
# this is the general mapping of VERB => OrganizationMemberType, it however
# does not enforce organization-level (i.e. has_global-access) vs project
# level so that should be done per subclass
access_map = {
'GET': None,
'POST': OrganizationMemberType.ADMIN,
'PUT': OrganizationMemberType.ADMIN,
'DELETE': OrganizationMemberType.OWNER,
}
def has_permission(self, request, view):
# session-based auth has all scopes for a logged in user
if not request.auth:
return request.user.is_authenticated()
allowed_scopes = set(self.scope_map[request.method])
current_scopes = request.auth.get_scopes()
return any(s in allowed_scopes for s in current_scopes)
def has_object_permission(self, request, view, obj):
return False
def is_project_key(self, request):
return isinstance(request.auth, ProjectKey)
class SuperuserPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_superuser:
return True
return False
| bsd-3-clause |
rmfranciacastillo/freecodecamp_projects | weather/node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
num1r0/bb_tools | wordlist_generator.py | 1 | 4922 | """
Wordlist generator tool.
Generates extended wordlist based on an initial list of possible words
Used mainly with hash cracking tools: hashcat, john, etc.
TO DO:
- Add logging function
"""
import datetime
import itertools
import sys
import os
def usage():
""" Usage function """
usage_message = """Usage wordlist_generator.py [ OPTIONS ]
OPTIONS:
-i Path to initial wordlist file (default: wordlist.txt)
-o Name of the file to save generated wordlist (default: gen_ext_wl.txt)
-t Datetime got from 'date' command, used as origin timestamp (ex.: Sat 28 Oct 22:06:28 BST 2017)
-w Time window size (in seconds). Subtracted/added to origin timestamp
-h Display this menu
EXAMPLES:
wordlist_generator.py -i wl.txt -o res.txt -t "Sat 28 Oct 22:06:28 BST 2017" -w 10
"""
print usage_message
def create_permutations(wordlist):
"""
Creates all possible permutations for given wordlist
"""
extended_wordlist = []
for length in range(0, len(wordlist)+1):
for subset in itertools.permutations(wordlist, length):
extended_wordlist.append("".join(subset))
return extended_wordlist
def convert_to_epoch_time(origin):
"""
Converts datetime into unix timestamp. Gets as an argument, result of linux 'date' command.
Input example: Sat 28 Oct 22:06:28 BST 2017
"""
pattern = "%a %d %b %H:%M:%S %Z %Y"
timestamp = datetime.datetime.strptime(origin, pattern).strftime("%s")
return timestamp
def generate_timestamps(epoch_origin, seconds_interval):
"""
Gets origin timestamp and generates a list of them, based on specified interval of seconds
"""
timestamps = []
past_timestamp = int(epoch_origin) - int(seconds_interval)
future_timestamp = int(epoch_origin) + int(seconds_interval)
for timestamp in range(past_timestamp, future_timestamp+1):
timestamps.append(timestamp)
return timestamps
def generate_extended_wordlist(timestamps, wordlist):
"""
For each timestamp, we generate the wordlist using permutations
"""
extended_wordlist = []
iter_wordlist = []
for timestamp in timestamps:
iter_wordlist = list(wordlist)
iter_wordlist.append(str(timestamp))
iter_extended_wordlist = create_permutations(iter_wordlist)
del iter_wordlist[:]
diff_wordlist = list(set(iter_extended_wordlist) - set(extended_wordlist))
extended_wordlist += diff_wordlist
return sorted(extended_wordlist)
def get_wordlist_from_file(file_path):
"""
Simple read file function; omits newline '\n' character on each line
"""
f = open(str(file_path), "r")
wordlist = f.read().splitlines()
return wordlist
def save_to_file(file_path, wordlist):
"""
Simple write file function
"""
if not str(file_path):
file_path = "gen_ext_wl.txt"
with open(file_path, 'w') as f:
for word in wordlist:
f.write(word)
f.write("\n")
f.close()
def main():
"""
Entry point
"""
arguments = sys.argv[1:]
if len(arguments) <= 1:
usage()
exit(0)
try:
# Need help?
arguments.index("-h")
usage()
except:
# Get initial wordlist file name
try:
initial_wordlist_path = str(arguments[int(arguments.index("-i") + 1)])
except:
# Logging function
initial_wordlist_path = "wordlist.txt"
print initial_wordlist_path
# Get file name to store generated wordlist
try:
new_wordlist_path = str(arguments[int(arguments.index("-o") + 1)])
except:
# Logging function
new_wordlist_path = "gen_ext_wl.txt"
print new_wordlist_path
# Get origin timestamp
try:
origin_timestamp = str(arguments[int(arguments.index("-t") + 1)])
except:
# Logging function
process = os.popen("date")
origin_timestamp = str(process.read()).strip()
process.close()
print origin_timestamp
# Get time window in seconds
try:
time_window_seconds = str(arguments[int(arguments.index("-w") + 1)])
except:
# Logging function
time_window_seconds = 45
print time_window_seconds
initial_wordlist = get_wordlist_from_file(initial_wordlist_path)
if not origin_timestamp.isdigit():
origin_timestamp = convert_to_epoch_time(origin_timestamp)
generated_timestamps = generate_timestamps(origin_timestamp, time_window_seconds)
generated_extended_wordlist = generate_extended_wordlist(generated_timestamps, initial_wordlist)
save_to_file(new_wordlist_path, generated_extended_wordlist)
if __name__ == "__main__":
main()
| gpl-3.0 |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/openshift_health_checker/library/rpm_version.py | 56 | 4226 | #!/usr/bin/python
"""
Ansible module for rpm-based systems determining existing package version information in a host.
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
IMPORT_EXCEPTION = None
try:
import rpm # pylint: disable=import-error
except ImportError as err:
IMPORT_EXCEPTION = err # in tox test env, rpm import fails
class RpmVersionException(Exception):
"""Base exception class for package version problems"""
def __init__(self, message, problem_pkgs=None):
Exception.__init__(self, message)
self.problem_pkgs = problem_pkgs
def main():
"""Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
if IMPORT_EXCEPTION:
module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
# determine the packages we will look for
pkg_list = module.params['package_list']
if not pkg_list:
module.fail_json(msg="package_list must not be empty")
# get list of packages available and complain if any
# of them are missing or if any errors occur
try:
pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
_check_pkg_versions(pkg_versions, _to_dict(pkg_list))
except RpmVersionException as excinfo:
module.fail_json(msg=str(excinfo))
module.exit_json(changed=False)
def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
def _retrieve_expected_pkg_versions(expected_pkgs_dict):
"""Search for installed packages matching given pkg names
and versions. Returns a dictionary: {pkg_name: [versions]}"""
transaction = rpm.TransactionSet()
pkgs = {}
for pkg_name in expected_pkgs_dict:
matched_pkgs = transaction.dbMatch("name", pkg_name)
if not matched_pkgs:
continue
for header in matched_pkgs:
if header['name'] == pkg_name:
if pkg_name not in pkgs:
pkgs[pkg_name] = []
pkgs[pkg_name].append(header['version'])
return pkgs
def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
invalid_pkg_versions = {}
not_found_pkgs = []
for pkg_name, pkg in expected_pkgs_dict.items():
if not found_pkgs_dict.get(pkg_name):
not_found_pkgs.append(pkg_name)
continue
found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
if isinstance(pkg["version"], string_types):
expected_versions = [_parse_version(pkg["version"])]
else:
expected_versions = [_parse_version(version) for version in pkg["version"]]
if not set(expected_versions) & set(found_versions):
invalid_pkg_versions[pkg_name] = {
"found_versions": found_versions,
"required_versions": expected_versions,
}
if not_found_pkgs:
raise RpmVersionException(
'\n'.join([
"The following packages were not found to be installed: {}".format('\n '.join([
"{}".format(pkg)
for pkg in not_found_pkgs
]))
]),
not_found_pkgs,
)
if invalid_pkg_versions:
raise RpmVersionException(
'\n '.join([
"The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
" \n{}\n Required version: {}\n Found versions: {}".format(
pkg_name,
', '.join(pkg["required_versions"]),
', '.join([version for version in pkg["found_versions"]]))
for pkg_name, pkg in invalid_pkg_versions.items()
]))
]),
invalid_pkg_versions,
)
def _parse_version(version_str):
segs = version_str.split('.')
if not segs or len(segs) <= 2:
return version_str
return '.'.join(segs[0:2])
if __name__ == '__main__':
main()
| apache-2.0 |
beakman/droidlab | droidlab/experiments/serializers.py | 1 | 1991 | from rest_framework import serializers
from rest_framework.reverse import reverse
from .models import Experiment, Result
class ResultSerializer(serializers.ModelSerializer):
class Meta:
model = Result
exclude = ('experiment',)
# class ExperimentSerializer(serializers.HyperlinkedModelSerializer):
# results = serializers.HyperlinkedIdentityField(view_name="results-list")
# class Meta:
# model = Experiment
# fields = ('name', 'date', 'results')
class ExperimentSerializer(serializers.ModelSerializer):
results = ResultSerializer(many=True)
class Meta:
model = Experiment
fields = ('id', 'name', 'date', 'user', 'results')
def create(self, validated_data):
results_data = validated_data.pop('results')
ex = Experiment.objects.create(**validated_data)
for result_data in results_data:
Result.objects.create(experiment=ex, **result_data)
return ex
def update(self, instance, validated_data):
results_data = validated_data.pop('results')
# Unless the application properly enforces that this field is
# always set, the follow could raise a `DoesNotExist`, which
# would need to be handled.
results = instance.results
instance.save()
results.save()
return instance
# class ResultHyperlink(serializers.HyperlinkedRelatedField):
# # We define these as class attributes, so we don't need to pass them as arguments.
# view_name = 'result-detail'
# queryset = Result.objects.all()
# def get_url(self, obj, view_name, request, format):
# url_kwargs = {
# 'experiment_name': obj.experiment.name,
# 'experiment_pk': obj.pk
# }
# return reverse(view_name, kwargs=url_kwargs, request=request, format=format)
# def get_object(self, view_name, view_args, view_kwargs):
# lookup_kwargs = {
# 'experiment__name': view_kwargs['experiment_name'],
# 'pk': view_kwargs['experiment_pk']
# }
# return self.get_queryset().get(**lookup_kwargs) | bsd-3-clause |
tracierenea/gnuradio | grc/gui/Colors.py | 4 | 1969 | """
Copyright 2008,2013 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
try:
import pygtk
pygtk.require('2.0')
import gtk
_COLORMAP = gtk.gdk.colormap_get_system() #create all of the colors
def get_color(color_code): return _COLORMAP.alloc_color(color_code, True, True)
HIGHLIGHT_COLOR = get_color('#00FFFF')
BORDER_COLOR = get_color('#444444')
# missing blocks stuff
MISSING_BLOCK_BACKGROUND_COLOR = get_color('#FFF2F2')
MISSING_BLOCK_BORDER_COLOR = get_color('red')
#param entry boxes
PARAM_ENTRY_TEXT_COLOR = get_color('black')
ENTRYENUM_CUSTOM_COLOR = get_color('#EEEEEE')
#flow graph color constants
FLOWGRAPH_BACKGROUND_COLOR = get_color('#FFFFFF')
COMMENT_BACKGROUND_COLOR = get_color('#F3F3F3')
FLOWGRAPH_EDGE_COLOR = COMMENT_BACKGROUND_COLOR
#block color constants
BLOCK_ENABLED_COLOR = get_color('#F1ECFF')
BLOCK_DISABLED_COLOR = get_color('#CCCCCC')
BLOCK_BYPASSED_COLOR = get_color('#FFFFE6')
#connection color constants
CONNECTION_ENABLED_COLOR = get_color('black')
CONNECTION_DISABLED_COLOR = get_color('#BBBBBB')
CONNECTION_ERROR_COLOR = get_color('red')
except:
print 'Unable to import Colors'
DEFAULT_DOMAIN_COLOR_CODE = '#777777'
| gpl-3.0 |
ilo10/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
mcallistersean/b2-issue-tracker | toucan/user_profile/forms.py | 2 | 2699 | from django import forms
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField
from phonenumber_field.widgets import PhoneNumberPrefixWidget
from ..organisations.models import Organisation
from .models import Profile, NotificationSettings
from allauth.account.forms import LoginForm
phone_number_help_text = _('Please enter your mobile phone number in international format. E.g. "+431234526"')
class BaseUserProfileSignupForm(forms.Form):
phone = PhoneNumberField(
label=_('Your mobile phone number'),
help_text=phone_number_help_text,
required=False
)
def signup(self, request, user):
# save phone number to profile
profile, created = Profile.objects.get_or_create(
user=user
)
phone_number = self.cleaned_data['phone']
if phone_number:
profile.phone_number = phone_number
profile.save()
class UserProfileSignupForm(BaseUserProfileSignupForm):
org = forms.ModelChoiceField(
queryset=Organisation.objects.all(),
label=_('Organisation'),
help_text=_('You can become a member of an existing organisation now or create your own at a later point.'),
required=False
)
def signup(self, request, user):
super().signup(request, user)
# if given add to organisation
org = self.cleaned_data.get('org')
if org:
org.add_member(user)
class NotificationSettingsForm(forms.ModelForm):
class Meta:
model = NotificationSettings
fields = [
'point',
'point_radius',
'notification_type',
'organisations',
'issue_types',
]
widgets = {
'point': forms.HiddenInput
}
help_texts = {
'organisations': _('Notifications can be filtered by Organisation (default: all).'),
'issue_types': _('Filter by issue type (default: all).'),
'notification_type': _('Choose how you want to be notified.')
}
error_messages = {
'point': {
'required': _('Please select a point from the map.')
}
}
class ToucanLoginForm(LoginForm):
def get_initial_for_field(self, field, field_name):
if field_name == 'remember':
return True
return super().get_initial_for_field(field, field_name)
class PhoneNumberUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = [
'phone_number'
]
help_texts = {
'phone_number': phone_number_help_text
} | mit |
aroche/django | tests/utils_tests/test_text.py | 243 | 9471 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
' jumped over the lazy dog.</em></strong></p>')
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
'</strong></p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
' the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
'jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días!'
' ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>',
truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
"abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode('utf-8') for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
| bsd-3-clause |
FabianHahn/libstore | thirdparty/googletest/googletest/test/gtest_color_test.py | 17 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
tomtor/QGIS | tests/src/python/test_qgspropertyoverridebutton.py | 45 | 5307 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPropertyOverrideButton.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '11/01/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsPropertyDefinition,
QgsProperty,
QgsApplication,
QgsProjectColorScheme)
from qgis.gui import (QgsColorButton,
QgsPropertyOverrideButton)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtGui import QColor
start_app()
class TestQgsPropertyOverrideButton(unittest.TestCase):
def testProjectColor(self):
scheme = [s for s in QgsApplication.colorSchemeRegistry().schemes() if isinstance(s, QgsProjectColorScheme)][0]
scheme.setColors([])
definition = QgsPropertyDefinition('test', 'test', QgsPropertyDefinition.ColorWithAlpha)
button = QgsPropertyOverrideButton()
button.init(0, QgsProperty(), definition)
button.aboutToShowMenu()
self.assertIn('Project Color', [a.text() for a in button.menu().actions()])
self.assertIn('Color', [a.text() for a in button.menu().actions()])
color_action = [a for a in button.menu().actions() if a.text() == 'Color'][0]
self.assertEqual([a.text() for a in color_action.menu().actions()][0], 'No colors set')
# add some project colors
scheme.setColors([[QColor(255, 0, 0), 'color 1'], [QColor(255, 255, 0), 'burnt marigold']])
button.aboutToShowMenu()
self.assertIn('Project Color', [a.text() for a in button.menu().actions()])
self.assertIn('Color', [a.text() for a in button.menu().actions()])
color_action = [a for a in button.menu().actions() if a.text() == 'Color'][0]
self.assertEqual([a.text() for a in color_action.menu().actions()], ['color 1', 'burnt marigold'])
button.menuActionTriggered(color_action.menu().actions()[1])
self.assertTrue(button.toProperty().isActive())
self.assertEqual(button.toProperty().asExpression(), 'project_color(\'burnt marigold\')')
button.menuActionTriggered(color_action.menu().actions()[0])
self.assertTrue(button.toProperty().isActive())
self.assertEqual(button.toProperty().asExpression(), 'project_color(\'color 1\')')
button.setToProperty(QgsProperty.fromExpression('project_color(\'burnt marigold\')'))
button.aboutToShowMenu()
color_action = [a for a in button.menu().actions() if a.text() == 'Color'][0]
self.assertTrue(color_action.isChecked())
self.assertEqual([a.isChecked() for a in color_action.menu().actions()], [False, True])
# should also see color menu for ColorNoAlpha properties
definition = QgsPropertyDefinition('test', 'test', QgsPropertyDefinition.ColorNoAlpha)
button = QgsPropertyOverrideButton()
button.init(0, QgsProperty(), definition)
button.aboutToShowMenu()
self.assertIn('Project Color', [a.text() for a in button.menu().actions()])
self.assertIn('Color', [a.text() for a in button.menu().actions()])
# but no color menu for other types
definition = QgsPropertyDefinition('test', 'test', QgsPropertyDefinition.Double)
button = QgsPropertyOverrideButton()
button.init(0, QgsProperty(), definition)
button.aboutToShowMenu()
self.assertNotIn('Project Color', [a.text() for a in button.menu().actions()])
self.assertNotIn('Color', [a.text() for a in button.menu().actions()])
def testLinkedColorButton(self):
definition = QgsPropertyDefinition('test', 'test', QgsPropertyDefinition.ColorWithAlpha)
button = QgsPropertyOverrideButton()
button.init(0, QgsProperty(), definition)
cb = QgsColorButton()
button.registerLinkedWidget(cb)
project_scheme = [s for s in QgsApplication.colorSchemeRegistry().schemes() if isinstance(s, QgsProjectColorScheme)][0]
project_scheme.setColors([[QColor(255, 0, 0), 'col1'], [QColor(0, 255, 0), 'col2']])
button.setToProperty(QgsProperty.fromValue('#ff0000'))
self.assertTrue(cb.isEnabled())
self.assertFalse(cb.linkedProjectColorName())
button.setActive(False)
self.assertTrue(cb.isEnabled())
self.assertFalse(cb.linkedProjectColorName())
button.setToProperty(QgsProperty.fromExpression('project_color(\'Cthulhu\'s delight\')'))
self.assertTrue(cb.isEnabled())
self.assertFalse(cb.linkedProjectColorName())
button.setToProperty(QgsProperty.fromExpression('project_color(\'col1\')'))
self.assertTrue(cb.isEnabled())
self.assertEqual(cb.linkedProjectColorName(), 'col1')
button.setActive(False)
self.assertTrue(cb.isEnabled())
self.assertFalse(cb.linkedProjectColorName())
button.setActive(True)
self.assertTrue(cb.isEnabled())
self.assertEqual(cb.linkedProjectColorName(), 'col1')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
zanemcca/node-inspector | front-end/cm/PRESUBMIT.py | 148 | 2368 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _CheckCodeMirrorChanges(input_api, output_api):
errorText = ("ERROR: Attempt to modify CodeMirror. The only allowed changes are "
"rolls from the upstream (http://codemirror.net). If this is a roll, "
"make sure you mention 'roll CodeMirror' (no quotes) in the change description.\n"
"CodeMirror rolling instructions:\n"
" https://sites.google.com/a/chromium.org/devtools-codemirror-rolling")
changeDescription = input_api.change.DescriptionText()
errors = []
if not "roll codemirror" in changeDescription.lower():
errors.append(output_api.PresubmitError(errorText))
return errors
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckCodeMirrorChanges(input_api, output_api))
return results
| bsd-2-clause |
icarito/sugar | src/jarabe/frame/clipboardmenu.py | 3 | 8708 | # Copyright (C) 2007, One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
import tempfile
import urlparse
import os
import logging
from gi.repository import Gio
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
from sugar3.datastore import datastore
from sugar3 import mime
from sugar3 import env
from sugar3 import profile
from sugar3.activity.i18n import pgettext
from jarabe.frame import clipboard
from jarabe.journal import misc
from jarabe.model import bundleregistry
class ClipboardMenu(Palette):
def __init__(self, cb_object):
Palette.__init__(self, text_maxlen=100)
self._cb_object = cb_object
self.set_group_id('frame')
cb_service = clipboard.get_instance()
cb_service.connect('object-state-changed',
self._object_state_changed_cb)
self._remove_item = MenuItem(pgettext('Clipboard', 'Remove'),
'list-remove')
self._remove_item.connect('activate', self._remove_item_activate_cb)
self.menu.append(self._remove_item)
self._remove_item.show()
self._open_item = MenuItem(_('Open'), 'zoom-activity')
self._open_item.connect('activate', self._open_item_activate_cb)
self.menu.append(self._open_item)
self._open_item.show()
self._journal_item = MenuItem(_('Keep'))
color = profile.get_color()
icon = Icon(icon_name='document-save',
pixel_size=style.SMALL_ICON_SIZE,
xo_color=color)
self._journal_item.set_image(icon)
self._journal_item.connect('activate', self._journal_item_activate_cb)
self.menu.append(self._journal_item)
self._journal_item.show()
self._update()
def _update_open_submenu(self):
activities = self._get_activities()
logging.debug('_update_open_submenu: %r', activities)
child = self._open_item.get_child()
if activities is None or len(activities) <= 1:
child.set_text(_('Open'))
if self._open_item.get_submenu() is not None:
self._open_item.set_submenu(None)
return
child.set_text(_('Open with'))
submenu = self._open_item.get_submenu()
if submenu is None:
submenu = Gtk.Menu()
self._open_item.set_submenu(submenu)
submenu.show()
else:
for item in submenu.get_children():
submenu.remove(item)
for service_name in activities:
registry = bundleregistry.get_registry()
activity_info = registry.get_bundle(service_name)
if not activity_info:
logging.warning('Activity %s is unknown.', service_name)
item = Gtk.MenuItem(activity_info.get_name())
item.connect('activate', self._open_submenu_item_activate_cb,
service_name)
submenu.append(item)
item.show()
def _update_items_visibility(self):
activities = self._get_activities()
installable = self._cb_object.is_bundle()
percent = self._cb_object.get_percent()
if percent == 100 and (activities or installable):
self._remove_item.props.sensitive = True
self._open_item.props.sensitive = True
self._journal_item.props.sensitive = True
elif percent == 100 and (not activities and not installable):
self._remove_item.props.sensitive = True
self._open_item.props.sensitive = False
self._journal_item.props.sensitive = True
else:
self._remove_item.props.sensitive = True
self._open_item.props.sensitive = False
self._journal_item.props.sensitive = False
def _get_activities(self):
mime_type = self._cb_object.get_mime_type()
if not mime_type:
return ''
registry = bundleregistry.get_registry()
activities = registry.get_activities_for_type(mime_type)
if activities:
return [info.get_bundle_id() for info in activities]
else:
return ''
def _object_state_changed_cb(self, cb_service, cb_object):
if cb_object != self._cb_object:
return
self._update()
def _update(self):
self.props.primary_text = self._cb_object.get_name()
preview = self._cb_object.get_preview()
if preview:
self.props.secondary_text = preview
self._update_items_visibility()
self._update_open_submenu()
def _open_item_activate_cb(self, menu_item):
logging.debug('_open_item_activate_cb')
percent = self._cb_object.get_percent()
if percent < 100 or menu_item.get_submenu() is not None:
return
jobject = self._copy_to_journal()
misc.resume(jobject.metadata, self._get_activities()[0])
jobject.destroy()
def _open_submenu_item_activate_cb(self, menu_item, service_name):
logging.debug('_open_submenu_item_activate_cb')
percent = self._cb_object.get_percent()
if percent < 100:
return
jobject = self._copy_to_journal()
misc.resume(jobject.metadata, service_name)
jobject.destroy()
def _remove_item_activate_cb(self, menu_item):
cb_service = clipboard.get_instance()
cb_service.delete_object(self._cb_object.get_id())
def _journal_item_activate_cb(self, menu_item):
logging.debug('_journal_item_activate_cb')
jobject = self._copy_to_journal()
jobject.destroy()
def _write_to_temp_file(self, data):
tmp_dir = os.path.join(env.get_profile_path(), 'data')
f, file_path = tempfile.mkstemp(dir=tmp_dir)
try:
os.write(f, data)
finally:
os.close(f)
return file_path
def _copy_to_journal(self):
formats = self._cb_object.get_formats().keys()
most_significant_mime_type = mime.choose_most_significant(formats)
format_ = self._cb_object.get_formats()[most_significant_mime_type]
transfer_ownership = False
if most_significant_mime_type == 'text/uri-list':
uri = format_.get_data()
if uri.startswith('file://'):
parsed_url = urlparse.urlparse(uri)
file_path = parsed_url.path # pylint: disable=E1101
transfer_ownership = False
mime_type = mime.get_for_file(file_path)
else:
file_path = self._write_to_temp_file(format_.get_data())
transfer_ownership = True
mime_type = 'text/uri-list'
else:
if format_.is_on_disk():
parsed_url = urlparse.urlparse(format_.get_data())
file_path = parsed_url.path # pylint: disable=E1101
transfer_ownership = False
mime_type = mime.get_for_file(file_path)
else:
file_path = self._write_to_temp_file(format_.get_data())
transfer_ownership = True
sniffed_mime_type = mime.get_for_file(file_path)
if sniffed_mime_type == 'application/octet-stream':
mime_type = most_significant_mime_type
else:
mime_type = sniffed_mime_type
jobject = datastore.create()
jobject.metadata['title'] = self._cb_object.get_name()
jobject.metadata['keep'] = '0'
jobject.metadata['buddies'] = ''
jobject.metadata['preview'] = ''
settings = Gio.Settings('org.sugarlabs.user')
color = settings.get_string('color')
jobject.metadata['icon-color'] = color
jobject.metadata['mime_type'] = mime_type
jobject.file_path = file_path
datastore.write(jobject, transfer_ownership=transfer_ownership)
return jobject
| gpl-3.0 |
lmyrefelt/CouchPotatoServer | libs/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| gpl-3.0 |
jroyal/plexpy | lib/requests/packages/urllib3/response.py | 243 | 11686 | import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
from .packages.six import string_types as basestring, binary_type
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if not 'read operation timed out' in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
| gpl-3.0 |
neogis-de/PPPP_utilities | pointcloud/identification_de_plan.py | 2 | 3554 | # -*- coding: utf-8 -*-
""" ce module permet de trouver un plan dans un nuage de points par RANSAC
@author Islam
copyriught 2015 Thales
"""
import numpy as np
import ransac as ran
import random
def creation_tableau(nb):
"""Creation d'un tableau de points
Args:
nb: Nombre de points
Return:
nuage_pts: le tableau de points
"""
nuage_pts = np.zeros((nb,3),dtype=np.float32)
for i in range(0,nuage_pts.shape[0]):
nuage_pts[i] = np.array([random.random(),random.random(),random.random()*0.1])
return nuage_pts
def augment(xyzs):
"""Provient de Projet py_ransac, pas d'info sur cette fonction
"""
axyz = np.ones((len(xyzs), 4))
axyz[:, :3] = xyzs
return axyz
def estimate(xyzs):
"""Provient de Projet py_ransac, pas d'info sur cette fonction
"""
axyz = augment(xyzs[:3])
return np.linalg.svd(axyz)[-1][-1, :]
def is_inlier(coeffs, xyz, threshold):
"""Boolean qui permet de savoir si un point appartient a un plan
"""
return np.abs(coeffs.dot(augment([xyz]).T)) < threshold
def sample(data, sample_size, accept_more=True, random_seed=None):
"""Provient de Projet py_ransac, pas d'info sur cette fonction
"""
random.seed(random_seed)
p = sample_size * 1. / len(data)
sample = []
while len(sample) < sample_size:
sample = []
for datum in data:
if random.random() < p:
sample.append(datum)
if (accept_more and len(sample) >= sample_size) or len(sample) == sample_size:
return sample
def fonction_pt_inlier(tableau, threshold):
"""Fonction qui permet d'obtenir la liste des indexes des point faisant parties du plan
Args:
tableau: le nuage de points
threshold: l'erreur maximum a ne pas depasser pour considerer un point comme faisant partie du plan
"""
list_index = []
s = sample(tableau, 3)
m = estimate(s)
for i in range(0, tableau.shape[0]):
if is_inlier(m,tableau[i], threshold):
list_index.insert(0,i)
j=0
while j < len(list_index):
j += 1
return list_index
def trouver_plan(pts, max_iterations, goal_inlier, Threshold):
"""Permet de trouver un plan a partir d'un nuage de points
Args:
pts: nuage de points
max_iteration: Maximum d'iteration pour rechercher le meilleur plan
goal_inlier: Le plan qui nous interesse
Threshold: Erreur maximum pour considerer un point comme faisant partie du plan
Return:
(a,b,c,d): liste correspondant aux indice du vecteur normal et de la distance par rapport a l'origine
list_inliers: liste des index des points se tranvant dans le plan
"""
list_inliers = fonction_pt_inlier(pts, Threshold)
m, b = ran.run_ransac(pts, estimate, lambda x, y: is_inlier(x, y, Threshold), 3, goal_inlier, max_iterations,stop_at_goal=False)
return list_inliers, m
def trouver_plan_test():
#from datetime import datetime
n = 100
max_iterations = 100
goal_inliers = n * 0.3
nb = 100
threshold = 0.1
pts = creation_tableau(nb)
#debut = datetime.now()
(a, b, c, d), listInd = trouver_plan(pts, max_iterations, goal_inliers, threshold)
#fin = datetime.now()
print("Le vecteur normal de la fonction est: [", a, "x ",b,"y ",c,"z]")
print(" la distance par rapport a l'origine est: ",d)
print("la liste des indexes des points appartenant au plan trouve est:")
print(listInd)
return (a, b, c, d), listInd
| lgpl-3.0 |
cfriedt/gnuradio | gr-digital/python/digital/qa_simple_framer.py | 57 | 2053 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_simple_framer(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_simple_framer_001(self):
src_data = (0x00, 0x11, 0x22, 0x33,
0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb,
0xcc, 0xdd, 0xee, 0xff)
expected_result = (
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x00, 0x00, 0x11, 0x22, 0x33, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x01, 0x44, 0x55, 0x66, 0x77, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x02, 0x88, 0x99, 0xaa, 0xbb, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x03, 0xcc, 0xdd, 0xee, 0xff, 0x55)
src = blocks.vector_source_b(src_data)
op = digital.simple_framer(4)
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_simple_framer, "test_simple_framer.xml")
| gpl-3.0 |
RobotLocomotion/libbot | bot2-procman/python/src/bot_procman/sheriff.py | 3 | 60827 | """@package sheriff
\defgroup python_api Python API
"""
import os
import platform
import sys
import time
import random
import signal
import gobject
import lcm
from bot_procman.info_t import info_t
from bot_procman.orders_t import orders_t
from bot_procman.sheriff_cmd_t import sheriff_cmd_t
from bot_procman.command2_t import command2_t
from bot_procman.info2_t import info2_t
from bot_procman.orders2_t import orders2_t
from bot_procman.sheriff_cmd2_t import sheriff_cmd2_t
from bot_procman.deputy_cmd2_t import deputy_cmd2_t
from bot_procman.discovery_t import discovery_t
import bot_procman.sheriff_config as sheriff_config
from bot_procman.sheriff_script import SheriffScript
from bot_procman.signal_slot import Signal
def _dbg(text):
return
#sys.stderr.write("%s\n" % text)
def _warn(text):
sys.stderr.write("[WARNING] %s\n" % text)
def _now_utime():
return int(time.time() * 1000000)
## \addtogroup python_api
# @{
## Command status - trying to start
TRYING_TO_START = "Starting (Command Sent)"
## Command status - running
RUNNING = "Running"
## Command status - trying to stop a command
TRYING_TO_STOP = "Stopping (Command Sent)"
## Command status - removing a command
REMOVING = "Removing (Command Sent)"
## Command status - command stopped without error
STOPPED_OK = "Stopped (OK)"
## Command status - command stopped with error
STOPPED_ERROR = "Stopped (Error)"
## Command status - unknown status
UNKNOWN = "Unknown"
## Command status - restarting a command
RESTARTING = "Restarting (Command Sent)"
## @} ##
DEFAULT_STOP_SIGNAL = 2
DEFAULT_STOP_TIME_ALLOWED = 7
class SheriffCommandSpec(object):
"""Basic command specification.
\ingroup python_api
"""
__slots__ = [ "deputy_name", "exec_str", "command_id", "group_name",
"auto_respawn", "stop_signal", "stop_time_allowed" ]
def __init__(self):
"""Initializer.
"""
## the name of the deputy that will manage this command.
self.deputy_name = ""
## the actual command string to execute.
self.exec_str = ""
## an identifier string for this command. Must be unique within a deputy.
self.command_id = ""
## the command group name, or the empty string for no group.
self.group_name = ""
## True if the deputy should automatically restart the
# command when it exits. Auto respawning only happens when the desired
# state of the command is running.
self.auto_respawn = False
## When stopping the command, this OS-level signal will be sent to the
# command to request a clean exit. The default is SIGINT
self.stop_signal = DEFAULT_STOP_SIGNAL
## When stopping the command, the deputy will wait this amount of time
# (seconds) in between requesting a clean exit and forcing the command
# to stop via a SIGKILL
self.stop_time_allowed = DEFAULT_STOP_TIME_ALLOWED
class SheriffDeputyCommand(object):
"""A command managed by a deputy, which is in turn managed by the %Sheriff.
\ingroup python_api
"""
def __init__(self):
## Sheriff-assigned number used to identify the process to this
# sheriff.
self.sheriff_id = 0
## Process ID of the command as reported by the deputy.
# Has value -1 if the PID is unknown.
# Has value 0 if the process is not running (i.e., stopped)
# Has a positive value if the process is running.
self.pid = -1
## If the command is stopped, its exit code.
self.exit_code = 0
## Command CPU usage, as reported by the deputy. Ranges from [0, 1]
self.cpu_usage = 0
## Virtual memory used by the command.
self.mem_vsize_bytes = 0
## Resident memory used by the command.
self.mem_rss_bytes = 0
## The executable string for the command.
self.exec_str = ""
## A user-assigned string identifier for the comand.
self.command_id = ""
## A user-assigned group name for the command, possibly empty.
self.group = ""
## Sheriff-managed variable used to start and stop the command.
self.desired_runid = 0
## Sheriff-managed variable used to start and stop the command.
self.force_quit = 0
## True if the command is being removed.
self.scheduled_for_removal = False
## Deputy-reported variable identifying the run ID of the command.
# This variable changes each time the command is started.
self.actual_runid = 0
## True if the deputy should automatically restart the command when it
# exits. Auto respawn only happens if the command is set to running.
self.auto_respawn = False
## When stopping the command, which OS signal to send the command to
# request that it cleanly exit. This usually defaults to SIGINT.
self.stop_signal = DEFAULT_STOP_SIGNAL
## When stopping the command, how much time to wait in between sending
# it stop_signal and a SIGKILL.
self.stop_time_allowed = DEFAULT_STOP_TIME_ALLOWED
## True if this data structure has been updated with information
# received from a deputy, False if not.
self.updated_from_info = False
def _update_from_cmd_info2(self, cmd_msg):
self.pid = cmd_msg.pid
self.actual_runid = cmd_msg.actual_runid
self.exit_code = cmd_msg.exit_code
self.cpu_usage = cmd_msg.cpu_usage
self.mem_vsize_bytes = cmd_msg.mem_vsize_bytes
self.mem_rss_bytes = cmd_msg.mem_rss_bytes
self.updated_from_info = True
# if the command has run to completion and we don't need it to respawn,
# then prevent it from respawning if the deputy restarts
if self.pid == 0 and \
self.actual_runid == self.desired_runid and \
not self.auto_respawn and \
not self.force_quit:
self.force_quit = 1
def _update_from_cmd_order2(self, cmd_msg):
assert self.sheriff_id == cmd_msg.sheriff_id
self.exec_str = cmd_msg.cmd.exec_str
self.command_id = cmd_msg.cmd.command_name
self.group = cmd_msg.cmd.group
self.desired_runid = cmd_msg.desired_runid
self.force_quit = cmd_msg.force_quit
self.stop_signal = cmd_msg.cmd.stop_signal
self.stop_time_allowed = cmd_msg.cmd.stop_time_allowed
def _set_group(self, group):
self.group = group
def _start(self):
# if the command is already running, then ignore
if self.pid > 0 and not self.force_quit:
return
self.desired_runid += 1
if self.desired_runid > (2 << 31):
self.desired_runid = 1
self.force_quit = 0
def _restart(self):
self.desired_runid += 1
if self.desired_runid > (2 << 31):
self.desired_runid = 1
self.force_quit = 0
def _stop(self):
self.force_quit = 1
def status(self):
"""Retrieve the status of the command, as understood by the
sheriff.
Returns one of:
- bot_procman.sheriff.TRYING_TO_START
- bot_procman.sheriff.RUNNING
- bot_procman.sheriff.TRYING_TO_STOP
- bot_procman.sheriff.REMOVING
- bot_procman.sheriff.STOPPED_OK
- bot_procman.sheriff.STOPPED_ERROR
- bot_procman.sheriff.UNKNOWN
- bot_procman.sheriff.RESTARTING
"""
if not self.updated_from_info:
return UNKNOWN
if self.desired_runid != self.actual_runid and not self.force_quit:
if self.pid == 0:
return TRYING_TO_START
else:
return RESTARTING
elif self.desired_runid == self.actual_runid:
if self.pid > 0:
if not self.force_quit and not self.scheduled_for_removal:
return RUNNING
else:
return TRYING_TO_STOP
else:
if self.scheduled_for_removal:
return REMOVING
elif self.exit_code == 0:
return STOPPED_OK
elif self.force_quit and \
os.WIFSIGNALED(self.exit_code) and \
os.WTERMSIG(self.exit_code) in [ signal.SIGTERM,
signal.SIGINT, signal.SIGKILL ]:
return STOPPED_OK
else: return STOPPED_ERROR
else:
return UNKNOWN
def __str__(self):
return """[%(exec_str)s]
group: %(group)s
sheriff_id: %(sheriff_id)d
pid: %(pid)d
exit_code: %(exit_code)d
cpu_usage: %(cpu_usage)f
mem_vsize: %(mem_vsize_bytes)d
mem_rss: %(mem_rss_bytes)d
actual_runid: %(actual_runid)d""" % self.__dict__
class SheriffDeputy(object):
"""%Sheriff view of a deputy
\ingroup python_api
"""
def __init__(self, name):
"""Initializes a deputy with the specified name. Do not use this
constructor directly. Instead, get a list of deputies from the
Sheriff.
"""
## Deputy name
self.name = name
## Last reported CPU load on the deputy. Ranges from [0, 1], where 0
# is no load and 1 is fully loaded.
self.cpu_load = 0
## Last reported total memory (in bytes) on the deputy.
self.phys_mem_total_bytes = 0
## Last reported free memory (in bytes) on the deputy.
self.phys_mem_free_bytes = 0
## Last time info from te deputy was received. Zero if no info has
# ever been received. Represented in microseconds since the epoch.
self.last_update_utime = 0
# Set to 1 if the deputy uses info_t, orders_t, etc.
# Set to 2 if the deputy uses info2_t, order2_t, etc.
self._orders_version = 2
# Dictionary of commands owned by the deputy
self._commands = {}
def get_commands(self):
"""Retrieve a list of all commands managed by the deputy
@return a list of SheriffDeputyCommand objects
"""
return self._commands.values()
def owns_command(self, cmd_object):
"""Check to see if this deputy manages the specified command
@param cmd_object a SheriffDeputyCommand object.
@return True if this deputy object manages \p command, False if not.
"""
return cmd_object.sheriff_id in self._commands and \
self._commands [cmd_object.sheriff_id] is cmd_object
def _update_from_deputy_info2(self, dep_info_msg):
"""
@dep_info_msg: an instance of bot_procman.info2_t
"""
status_changes = []
for cmd_msg in dep_info_msg.cmds:
# look up the command, or create a new one if it's not found
if cmd_msg.sheriff_id in self._commands:
cmd = self._commands[cmd_msg.sheriff_id]
old_status = cmd.status()
else:
cmd = SheriffDeputyCommand()
cmd.exec_str = cmd_msg.cmd.exec_str
cmd.command_id = cmd_msg.cmd.command_name
cmd.group = cmd_msg.cmd.group
cmd.auto_respawn = cmd_msg.cmd.auto_respawn
cmd.stop_signal = cmd_msg.cmd.stop_signal
cmd.sheriff_id = cmd_msg.sheriff_id
cmd.desired_runid = cmd_msg.actual_runid
cmd.stop_time_allowed = cmd_msg.cmd.stop_time_allowed
# TODO handle options
self._add_command(cmd)
old_status = None
cmd._update_from_cmd_info2(cmd_msg)
new_status = cmd.status()
if old_status != new_status:
status_changes.append((cmd, old_status, new_status))
updated_ids = [ cmd_msg.sheriff_id for cmd_msg in dep_info_msg.cmds ]
can_safely_remove = [ cmd for cmd in self._commands.values() \
if cmd.scheduled_for_removal and \
cmd.sheriff_id not in updated_ids ]
for toremove in can_safely_remove:
cmd = self._commands[toremove.sheriff_id]
old_status = cmd.status()
status_changes.append((cmd, old_status, None))
del self._commands[toremove.sheriff_id]
self.last_update_utime = _now_utime()
self.cpu_load = dep_info_msg.cpu_load
self.phys_mem_total_bytes = dep_info_msg.phys_mem_total_bytes
self.phys_mem_free_bytes = dep_info_msg.phys_mem_free_bytes
return status_changes
def _update_from_deputy_orders2(self, orders_msg):
status_changes = []
for cmd_msg in orders_msg.cmds:
if cmd_msg.sheriff_id in self._commands:
cmd = self._commands[cmd_msg.sheriff_id]
old_status = cmd.status()
else:
cmd = SheriffDeputyCommand()
cmd.sheriff_id = cmd_msg.sheriff_id
cmd.exec_str = cmd_msg.cmd.exec_str
cmd.command_id = cmd_msg.cmd.command_name
cmd.group = cmd_msg.cmd.group
cmd.auto_respawn = cmd_msg.cmd.auto_respawn
cmd.stop_signal = cmd_msg.cmd.stop_signal
cmd.stop_time_allowed = cmd_msg.cmd.stop_time_allowed
cmd.desired_runid = cmd_msg.desired_runid
self._add_command(cmd)
old_status = None
cmd._update_from_cmd_order2(cmd_msg)
new_status = cmd.status()
if old_status != new_status:
status_changes.append((cmd, old_status, new_status))
updated_ids = set([ cmd_msg.sheriff_id for cmd_msg in orders_msg.cmds ])
for cmd in self._commands.values():
if cmd.sheriff_id not in updated_ids:
old_status = cmd.status()
cmd.scheduled_for_removal = True
new_status = cmd.status()
if old_status != new_status:
status_changes.append((cmd, old_status, new_status))
return status_changes
def _add_command(self, newcmd):
assert newcmd.sheriff_id != 0
assert isinstance(newcmd, SheriffDeputyCommand)
self._commands[newcmd.sheriff_id] = newcmd
def _schedule_for_removal(self, cmd):
if not self.owns_command(cmd):
raise KeyError("invalid command")
old_status = cmd.status()
cmd.scheduled_for_removal = True
if not self.last_update_utime:
del self._commands[cmd.sheriff_id]
new_status = None
else:
new_status = cmd.status()
return ((cmd, old_status, new_status),)
def _make_orders_message(self, sheriff_name):
orders = orders_t()
orders.utime = _now_utime()
orders.host = self.name
orders.ncmds = len(self._commands)
orders.sheriff_name = sheriff_name
for cmd in self._commands.values():
if cmd.scheduled_for_removal:
orders.ncmds -= 1
continue
cmd_msg = sheriff_cmd_t()
cmd_msg.name = cmd.exec_str
cmd_msg.nickname = cmd.command_id
cmd_msg.sheriff_id = cmd.sheriff_id
cmd_msg.desired_runid = cmd.desired_runid
cmd_msg.force_quit = cmd.force_quit
cmd_msg.group = cmd.group
cmd_msg.auto_respawn = cmd.auto_respawn
orders.cmds.append(cmd_msg)
orders.nvars = 0
orders.varnames = []
orders.varvals = []
return orders
def _make_orders2_message(self, sheriff_name):
msg = orders2_t()
msg.utime = _now_utime()
msg.host = self.name
msg.ncmds = len(self._commands)
msg.sheriff_name = sheriff_name
for cmd in self._commands.values():
if cmd.scheduled_for_removal:
msg.ncmds -= 1
continue
cmd_msg = sheriff_cmd2_t()
cmd_msg.cmd = command2_t()
cmd_msg.cmd.exec_str = cmd.exec_str
cmd_msg.cmd.command_name = cmd.command_id
cmd_msg.cmd.group = cmd.group
cmd_msg.cmd.auto_respawn = cmd.auto_respawn
cmd_msg.cmd.stop_signal = cmd.stop_signal
cmd_msg.cmd.stop_time_allowed = cmd.stop_time_allowed
cmd_msg.cmd.num_options = 0
cmd_msg.cmd.option_names = []
cmd_msg.cmd.option_values = []
cmd_msg.sheriff_id = cmd.sheriff_id
cmd_msg.desired_runid = cmd.desired_runid
cmd_msg.force_quit = cmd.force_quit
msg.cmds.append(cmd_msg)
msg.num_options = 0
msg.option_names = []
msg.option_values = []
return msg
class ScriptExecutionContext(object):
def __init__(self, sheriff, script):
assert(script is not None)
self.script = script
self.current_action = -1
self.subscript_context = None
self.sheriff = sheriff
def get_next_action(self):
if self.subscript_context:
# if we're recursing into a called script, return its next action
action = self.subscript_context.get_next_action()
if action:
return action
else:
# unless it's done, in which case fall through to our next
# action
self.subscript_context = None
self.current_action += 1
if self.current_action >= len(self.script.actions):
# no more actions
return None
action = self.script.actions[self.current_action]
if action.action_type == "run_script":
subscript = self.sheriff.get_script(action.script_name)
self.subscript_context = ScriptExecutionContext(self.sheriff,
subscript)
return self.get_next_action()
else:
return action
class Sheriff(object):
"""Controls deputies and processes.
\ingroup python_api
The Sheriff class provides the primary interface for controlling processes
using the Procman Python API. It requires a GLib event loop to run.
example usage:
\code
import bot_procman
import gobject
lc = lcm.LCM()
sheriff = bot_procman.Sheriff(lc)
# add commands or load a config file
mainloop = gobject.MainLoop()
gobject.io_add_watch(lc, gobject.IO_IN, lambda *s: lc.handle() or True)
gobject.timeout_add(1000, lambda *s: sheriff.send_orders() or True)
mainloop.run()
\endcode
## Signals ##
The Sheriff exposes a number of signals that you can use to
register a callback function that gets called when a particular event
happens.
For example, to be notified when the status of a command changes:
\code
def on_command_status_changed(cmd_object, old_status, new_status):
print("Command %s status changed from %s -> %s" % (cmd_obj.command_id,
old_status, new_status)
sheriff.command_status_changed.connect(on_command_status_changed)
\endcode
"""
def __init__ (self, lcm_obj = None):
"""Initialize a new Sheriff object.
\param lcm_obj the LCM object to use for communication. If None, then
the sheriff creates a new lcm.LCM() instance.
"""
self._lcm = lcm_obj
if self._lcm is None:
self._lcm = lcm.LCM()
self._lcm.subscribe("PMD_INFO", self._on_pmd_info)
self._lcm.subscribe("PMD_INFO2", self._on_pmd_info2)
self._lcm.subscribe("PMD_ORDERS", self._on_pmd_orders)
self._lcm.subscribe("PMD_ORDERS2", self._on_pmd_orders2)
self._deputies = {}
self._is_observer = False
self._name = platform.node() + ":" + str(os.getpid()) + \
":" + str(_now_utime())
# variables for scripts
self._scripts = []
self._active_script_context = None
self._waiting_on_commands = []
self._waiting_for_status = None
self._last_script_action_time = None
# publish a discovery message to query for existing deputies
discover_msg = discovery_t()
discover_msg.utime = _now_utime()
discover_msg.host = ""
discover_msg.nonce = 0
self._lcm.publish("PMD_DISCOVER", discover_msg.encode())
# signals
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when
# information from a deputy is received and processed.
# `deputy_info_received(deputy_object)`
#
# \param deputy_object is a SheriffDeputy corresponding to the updated deputy.
self.deputy_info_received = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a new
# command is added to the sheriff.
#
# \param deputy_object is a SheriffDeputy for the deputy that owns the
# command.
# \param cmd_object is a SheriffDeputyCommand for the new command.
self.command_added = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a command
# is removed from the sheriff.
# `command_removed(deputy_object, cmd_object)`
#
# \param deputy_object is a SheriffDeputy for the deputy that owned the command.
# \param cmd_object is a SheriffDeputyCommand for the removed command.
self.command_removed = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when the
# status of a command changes (e.g., running, stopped, etc.).
# `command_status_changed(cmd_object, old_status, new_status)`
#
# \param cmd_object is a SheriffDeputyCommand for the command.
# \param old_status indicates the old command status.
# \param new_status indicates the new command status.
self.command_status_changed = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a command
# is moved into a different group.
#
# \param cmd_object the command whose group changes.
self.command_group_changed = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a script
# is added.
#
# \param script_object a [SheriffScript](\ref bot_procman.sheriff_script.SheriffScript) object.
self.script_added = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a script
# is removed.
#
# \param script_object a [SheriffScript](\ref bot_procman.sheriff_script.SheriffScript) object.
self.script_removed = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a script
# begins executing.
# `script_started(script_object)`
#
# \param script_object a [SheriffScript](\ref bot_procman.sheriff_script.SheriffScript) object.
self.script_started = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a single
# action in a script begins to run. (e.g., start a command)
# `script_action_executing(script_object, action)`
#
# \param script_object a [SheriffScript](\ref bot_procman.sheriff_script.SheriffScript) object
# \param action one of: [StartStopRestartAction](\ref bot_procman.sheriff_script.StartStopRestartAction),
# [WaitMsAction](\ref bot_procman.sheriff_script.WaitMsAction),
# [WaitStatusAction](\ref bot_procman.sheriff_script.WaitStatusAction),
# [RunScriptAction](\ref bot_procman.sheriff_script.RunScriptAction)
self.script_action_executing = Signal()
## [Signal](\ref bot_procman.signal_slot.Signal) emitted when a script
# finishes execution.
# `script_finished(script_object)`
#
# \param script_object a SheriffScript object
self.script_finished = Signal()
def _get_or_make_deputy(self, deputy_name):
if deputy_name not in self._deputies:
self._deputies[deputy_name] = SheriffDeputy(deputy_name)
return self._deputies[deputy_name]
def _maybe_emit_status_change_signals(self, deputy, status_changes):
for cmd, old_status, new_status in status_changes:
if old_status == new_status:
continue
if old_status is None:
self.command_added(deputy, cmd)
elif new_status is None:
self.command_removed(deputy, cmd)
else:
self._check_wait_action_status()
self.command_status_changed(cmd, old_status, new_status)
def _get_command_deputy(self, cmd):
for deputy in self._deputies.values():
if deputy.owns_command(cmd):
return deputy
raise KeyError()
def _handle_info2_t(self, info_msg, version):
now = _now_utime()
if(now - info_msg.utime) * 1e-6 > 30 and not self.is_observer:
# ignore old messages
return
_dbg("received pmd info from [%s]" % info_msg.host)
deputy = self._get_or_make_deputy(info_msg.host)
# If this is the first time we've heard from the deputy and we already
# have a desired state for the deputy, then try to reconcile the stored
# desired state with the deputy's reported state.
if not deputy.last_update_utime and deputy._commands:
_dbg("First update from [%s]" % info_msg.host)
# for each command we already have lined up in the deputy, check to
# see if the deputy is already managing that command. If the
# deputy is already managing that command, then reassign the
# internal ID for the command to match what the deputy is
# reporting.
for cmd in deputy._commands.values():
for cmd_msg in info_msg.cmds:
matched = cmd.exec_str == cmd_msg.cmd.exec_str and \
cmd.command_id == cmd_msg.cmd.command_name and \
cmd.group == cmd_msg.cmd.group and \
cmd.auto_respawn == cmd_msg.cmd.auto_respawn
if not matched:
continue
collision = False
for other_deputy in self._deputies.values():
if other_deputy._commands.get(cmd_msg.sheriff_id, cmd) \
is not cmd:
collision = True
break
if collision:
continue
# found a command managed by the deputy that looks
# exactly like the command the sheriff wants the
# deputy to run. Reassign the sheriff ID to match
# what the deputy is reporting.
del deputy._commands[cmd.sheriff_id]
cmd.sheriff_id = cmd_msg.sheriff_id
deputy._commands[cmd.sheriff_id] = cmd
_dbg("Merging command [%s] with command reported by deputy" \
% cmd.command_id)
break
deputy._orders_version = version
status_changes = deputy._update_from_deputy_info2(info_msg)
self.deputy_info_received(deputy)
self._maybe_emit_status_change_signals(deputy, status_changes)
def _on_pmd_info2(self, _, data):
try:
info_msg = info2_t.decode(data)
except ValueError:
print("invalid info2_t message")
return
self._handle_info2_t(info_msg, 2)
def _on_pmd_info(self, _, data):
try:
dep_info = info_t.decode(data)
except ValueError:
print("invalid info_t message")
return
new_info_msg = info2_t()
new_info_msg.utime = dep_info.utime
new_info_msg.host = dep_info.host
new_info_msg.cpu_load = dep_info.cpu_load
new_info_msg.phys_mem_total_bytes = dep_info.phys_mem_total_bytes
new_info_msg.phys_mem_free_bytes = dep_info.phys_mem_free_bytes
new_info_msg.swap_total_bytes = dep_info.swap_total_bytes
new_info_msg.swap_free_bytes = dep_info.swap_free_bytes
new_info_msg.ncmds = dep_info.ncmds
new_info_msg.num_options = 0
new_info_msg.option_names = []
new_info_msg.option_values = []
new_info_msg.cmds = []
for cmd_index, cmd_info in enumerate(dep_info.cmds):
cmd_msg = dep_info.cmds[cmd_index]
new_cmd_msg = deputy_cmd2_t()
new_cmd_msg.cmd = command2_t()
new_cmd_msg.cmd.exec_str = cmd_msg.name
new_cmd_msg.cmd.command_name = cmd_msg.nickname
new_cmd_msg.cmd.group = cmd_msg.group
new_cmd_msg.cmd.auto_respawn = cmd_msg.auto_respawn
new_cmd_msg.cmd.stop_signal = DEFAULT_STOP_SIGNAL
new_cmd_msg.cmd.stop_time_allowed = DEFAULT_STOP_TIME_ALLOWED
new_cmd_msg.cmd.num_options = 0
new_cmd_msg.cmd.option_names = []
new_cmd_msg.cmd.option_values = []
new_cmd_msg.pid = cmd_msg.pid
new_cmd_msg.actual_runid = cmd_msg.actual_runid
new_cmd_msg.exit_code = cmd_msg.exit_code
new_cmd_msg.cpu_usage = cmd_msg.cpu_usage
new_cmd_msg.mem_vsize_bytes = cmd_msg.mem_vsize_bytes
new_cmd_msg.mem_rss_bytes = cmd_msg.mem_rss_bytes
new_cmd_msg.sheriff_id = cmd_msg.sheriff_id
new_info_msg.cmds.append(new_cmd_msg)
self._handle_info2_t(new_info_msg, 1)
def _handle_orders2_t(self, orders_msg):
if not self._is_observer:
return
deputy = self._get_or_make_deputy(orders_msg.host)
status_changes = deputy._update_from_deputy_orders2(orders_msg)
self._maybe_emit_status_change_signals(deputy, status_changes)
def _on_pmd_orders2(self, _, data):
orders_msg = orders2_t.decode(data)
self._handle_orders2_t(orders_msg)
def _on_pmd_orders(self, _, data):
dep_orders = orders_t.decode(data)
new_orders = orders2_t()
new_orders.utime = dep_orders.utime
new_orders.host = dep_orders.host
new_orders.sheriff_name = dep_orders.sheriff_name
new_orders.num_options = 0
new_orders.option_names = []
new_orders.option_values = []
new_orders.ncmds = dep_orders.ncmds
for cmd_index, cmd_order in enumerate(dep_orders.cmds):
cmd_msg = dep_orders.cmds[cmd_index]
new_cmd_msg = sheriff_cmd2_t()
new_cmd_msg.cmd = command2_t()
new_cmd_msg.cmd.exec_str = cmd_msg.name
new_cmd_msg.cmd.command_name = cmd_msg.nickname
new_cmd_msg.cmd.group = cmd_msg.group
new_cmd_msg.cmd.auto_respawn = cmd_msg.auto_respawn
new_cmd_msg.cmd.stop_signal = DEFAULT_STOP_SIGNAL
new_cmd_msg.cmd.stop_time_allowed = DEFAULT_STOP_TIME_ALLOWED
new_cmd_msg.cmd.num_options = 0
new_cmd_msg.cmd.option_names = []
new_cmd_msg.cmd.option_values = []
new_cmd_msg.desired_runid = cmd_msg.desired_runid
new_cmd_msg.force_quit = cmd_msg.force_quit
new_cmd_msg.sheriff_id = cmd_msg.sheriff_id
new_orders.cmds.append(new_cmd_msg)
self._handle_orders2_t(new_orders)
def __get_free_sheriff_id(self):
id_to_try = random.randint(0, (1 << 31) - 1)
for _ in range(1 << 16):
collision = False
for deputy in self._deputies.values():
if id_to_try in deputy._commands:
collision = True
break
if not collision:
result = id_to_try
id_to_try = random.randint(0, (1 << 31) - 1)
if not collision:
return result
raise RuntimeError("no available sheriff id")
def get_name(self):
"""Retrieve the sheriff name, as self reported to deputies.
The sheriff name is automatically set to a combination of the
hostname, current PID, and the time the sheriff was created."""
return self._name
def send_orders(self):
"""Transmit orders to all deputies. Call this method for the sheriff
to send updated orders to its deputies. This method is automatically
called when you call other sheriff methods such as add_command(),
start_command(), etc. In general, you should only need to explicitly
call this method for a periodic transmission to be robust against
network failures and dropped messages.
@note Orders will only be sent to a deputy if the sheriff has received at
least one update from the deputy.
"""
if self._is_observer:
raise ValueError("Can't send orders in Observer mode")
for deputy in self._deputies.values():
# only send orders to a deputy if we've heard from it.
if deputy.last_update_utime > 0:
version = deputy._orders_version
if version == 1:
msg = deputy._make_orders_message(self._name)
self._lcm.publish("PMD_ORDERS", msg.encode())
else:
msg = deputy._make_orders2_message(self._name)
self._lcm.publish("PMD_ORDERS2", msg.encode())
def add_command(self, spec):
"""Add a new command.
@param spec a SheriffCommandSpec that describes the new command to add
@return a SheriffDeputyCommand object representing the command.
"""
if self._is_observer:
raise ValueError("Can't add commands in Observer mode")
if not spec.exec_str:
raise ValueError("Invalid command")
if not spec.command_id:
raise ValueError("Invalid command id")
if self.get_commands_by_deputy_and_id(spec.deputy_name, spec.command_id):
_warn("Duplicate command id %s in group [%s]" % (spec.command_id, spec.group_name))
if not spec.deputy_name:
raise ValueError("Invalid deputy")
dep = self._get_or_make_deputy(spec.deputy_name)
newcmd = SheriffDeputyCommand()
newcmd.exec_str = spec.exec_str
newcmd.command_id = spec.command_id
newcmd.group = spec.group_name
newcmd.sheriff_id = self.__get_free_sheriff_id()
newcmd.auto_respawn = spec.auto_respawn
newcmd.stop_signal = spec.stop_signal
newcmd.stop_time_allowed = spec.stop_time_allowed
dep._add_command(newcmd)
self.command_added(dep, newcmd)
self.send_orders()
return newcmd
def start_command(self, cmd):
"""Sets a command's desired status to running. If the command is not
running, then the deputy will start it. If the command is already
running, then no action is taken.
This method calls send_orders().
@param cmd a SheriffDeputyCommand object specifying the command to run.
"""
if self._is_observer:
raise ValueError("Can't modify commands in Observer mode")
old_status = cmd.status()
cmd._start()
new_status = cmd.status()
deputy = self.get_command_deputy(cmd)
self._maybe_emit_status_change_signals(deputy,
((cmd, old_status, new_status),))
self.send_orders()
def restart_command(self, cmd):
"""Starts a command if it's not running, or stop and then start it if it's
already running. If the command is not running, then the deputy will
start it. If the command is already running, then the deputy will
terminate it and then start it again.
This method calls send_orders().
@param cmd a SheriffDeputyCommand object specifying the command to
restart.
"""
if self._is_observer:
raise ValueError("Can't modify commands in Observer mode")
old_status = cmd.status()
cmd._restart()
new_status = cmd.status()
deputy = self.get_command_deputy(cmd)
self._maybe_emit_status_change_signals(deputy,
((cmd, old_status, new_status),))
self.send_orders()
def stop_command(self, cmd):
"""Sets a command's desired status to stopped. If the command is
running, then the deputy will stop it. If the command is not running,
then no action is taken. This method calls send_orders().
@param cmd a SheriffDeputyCommand object specifying the command to stop.
"""
if self._is_observer:
raise ValueError("Can't modify commands in Observer mode")
old_status = cmd.status()
cmd._stop()
new_status = cmd.status()
deputy = self.get_command_deputy(cmd)
self._maybe_emit_status_change_signals(deputy,
((cmd, old_status, new_status),))
self.send_orders()
def set_command_exec(self, cmd, exec_str):
"""Set the executable string for a command. Calling this will not
terminate the command if it's already running, and the new execution
command will not take effect until the next time the command is run by
the deputy.
This method does not call send_orders()
@param cmd a SheriffDeputyCommand object.
@param exec_str the actual command string to execute.
"""
cmd.exec_str = exec_str
def set_command_id(self, cmd, new_id):
"""Set the command id.
@param cmd a SheriffDeputyCommand object.
@param new_id the new id to identify a command with.
"""
if not new_id.strip():
raise ValueError("Empty command id not allowed")
if self.get_commands_by_id(new_id):
_warn("Duplicate command id [%s]" % new_id)
cmd.command_id = new_id
def set_command_group(self, cmd, group_name):
"""Set the command group.
@param cmd a SheriffDeputyCommand object.
@param group_name the new group name for the command.
"""
group_name = group_name.strip("/")
while group_name.find("//") >= 0:
group_name = group_name.replace("//", "/")
if self._is_observer:
raise ValueError("Can't modify commands in Observer mode")
# deputy = self._get_command_deputy(cmd)
old_group = cmd.group
if old_group != group_name:
cmd._set_group(group_name)
self.command_group_changed( cmd)
def set_auto_respawn(self, cmd, newauto_respawn):
"""Set if a deputy should auto-respawn the command when the command
terminates.
@param cmd a SheriffDeputyCommand object.
@param newauto_respawn True if the command should be automatically
restarted.
"""
cmd.auto_respawn = newauto_respawn
def set_command_stop_signal(self, cmd, new_stop_signal):
"""Set the OS signal that is sent to a command when requesting it to
stop cleanly. If the command doesn't cleanly exit within the stop time
allowed, then it is sent a SIGKILL."""
cmd.stop_signal = new_stop_signal
def set_command_stop_time_allowed(self, cmd, new_stop_time_allowed):
"""Set how much time (seconds) to wait for a command to exit cleanly when
stopping the command, before sending it a SIGKILL. Integer values only.
"""
cmd.stop_time_allowed = int(new_stop_time_allowed)
def schedule_command_for_removal(self, cmd):
"""Remove a command. This starts the process of purging a command from
the sheriff and deputies. It is not instantaneous, because the sheriff
needs to wait for removal confirmation from the deputy.
This method calls send_orders()
@param cmd a SheriffDeputyCommand object to remove.
"""
if self._is_observer:
raise ValueError("Can't remove commands in Observer mode")
deputy = self.get_command_deputy(cmd)
status_changes = deputy._schedule_for_removal(cmd)
self._maybe_emit_status_change_signals(deputy, status_changes)
self.send_orders()
def move_command_to_deputy(self, cmd, newdeputy_name):
"""Move a command from one deputy to another. This removes the command
from one deputy, and creates it in another. This method calls
send_orders(). On return, the passed in command object is no longer
valid and should not be used.
@param cmd a SheriffDeputyCommand object to move. This object is invalidated by this method.
@param newdeputy_name the name of the new deputy for the command.
@return the newly created command
"""
self.schedule_command_for_removal(cmd)
spec = SheriffCommandSpec()
spec.deputy_name = newdeputy_name
spec.exec_str = cmd.exec_str
spec.command_id = cmd.command_id
spec.group_name = cmd.group
spec.auto_respawn = cmd.auto_respawn
spec.stop_signal = cmd.stop_signal
spec.stop_time_allowed = cmd.stop_time_allowed
return self.add_command(spec)
def set_observer(self, is_observer):
"""Set the sheriff into observation mode, or remove it from observation
mode.
@param is_observer True if the sheriff should enter observation mode,
False if it should leave it.
"""
self._is_observer = is_observer
def is_observer(self):
"""Check if the sheriff is in observer mode.
@return True if the sheriff is in observer mode, False if not.
"""
return self._is_observer
def get_deputies(self):
"""Retrieve a list of known deputies.
@return a list of SheriffDeputy objects.
"""
return self._deputies.values()
def find_deputy(self, name):
"""Retrieve the SheriffDeputy object by deputy name.
@param name the name of the desired deputy.
@return a SheriffDeputy object.
"""
return self._deputies[name]
def purge_useless_deputies(self):
"""Clean up the Sheriff internal state.
This method is meant to be called when a deputy process has no more
commands and terminates. It purges the Sheriff's internal representation
of deputies that don't have any commands.
"""
for deputy_name, deputy in self._deputies.items():
cmds = deputy._commands.values()
if not deputy._commands or \
all([ cmd.scheduled_for_removal for cmd in cmds ]):
del self._deputies[deputy_name]
def get_command_by_sheriff_id(self, sheriff_id):
"""Retrieve a command by its sheriff ID.
The sheriff ID is assigned and managed by the sheriff automtically. It
is not the same as the user-assigned command ID. You generally should
not need to use this function.
"""
for deputy in self._deputies.values():
if sheriff_id in deputy._commands:
return deputy._commands[sheriff_id]
raise KeyError("No such command")
def get_command_deputy(self, command):
"""Retrieve the SheriffDeputy that manages the specified command.
@param command a SheriffDeputyCommand object
@return a SheriffDeputy object corresponding to the deputy that manages
the specified command.
"""
for deputy in self._deputies.values():
if command.sheriff_id in deputy._commands:
return deputy
raise KeyError("No such command")
def get_all_commands(self):
"""Retrieve all commands managed by all deputies.
@return a list of SheriffDeputyCommand objects.
"""
cmds = []
for dep in self._deputies.values():
cmds.extend(dep._commands.values())
return cmds
def get_commands_by_deputy_and_id(self, deputy_name, cmd_id):
"""Search for commands with the specified deputy name and command
id. This should return at most one command.
@param deputy_name the desired deputy name
@param cmd_id the desired command id.
@return a list of SheriffDeputyCommand objects matching the query, or an
empty list if none are found.
"""
if deputy_name not in self._deputies:
return []
result = []
for cmd in self._deputies[deputy_name]._commands.values():
if cmd.command_id == cmd_id:
result.append(cmd)
return result
def get_commands_by_id(self, cmd_id):
"""Retrieve all commands with the specified id. This should only
return one command.
@param cmd_id the desired command id.
@return a list of SheriffDeputyCommand objects matching the query, or an
empty list if none are found.
"""
result = []
for deputy in self._deputies.values():
for cmd in deputy._commands.values():
if cmd.command_id == cmd_id:
result.append(cmd)
return result
def get_commands_by_group(self, group_name):
"""Retrieve a list of all commands in the specified group. Use this
method to find out what commands are in a group. Commands in subgroups
of the specified group are also included.
@param group_name the name of the desired group
@return a list of SheriffDeputyCommand objects.
"""
result = []
group_name = group_name.strip("/")
while group_name.find("//") >= 0:
group_name = group_name.replace("//", "/")
group_parts = group_name.split("/")
for deputy in self._deputies.values():
for cmd in deputy._commands.values():
cmd_group_parts = cmd.group.split("/")
if len(group_parts) <= len(cmd_group_parts) and \
all([ cgp == gp for cgp, gp in zip(group_parts,
cmd_group_parts)]):
result.append(cmd)
return result
def get_active_script(self):
"""Retrieve the currently executing script
@return the SheriffScript object corresponding to the active script, or
None if there is no active script.
"""
if self._active_script_context:
return self._active_script_context.script
return None
def get_script(self, name):
"""Look up a script by name
@param name the name of the script
@return a SheriffScript object, or None if no such script is found.
"""
for script in self._scripts:
if script.name == name:
return script
return None
def get_scripts(self):
"""Retrieve a list of all scripts
@return a list of SheriffScript objects
"""
return self._scripts
def add_script(self, script):
"""Add a new script to the sheriff.
@param script a SheriffScript object.
"""
if self.get_script(script.name) is not None:
raise ValueError("Script [%s] already exists", script.name)
self._scripts.append(script)
self.script_added(script)
def remove_script(self, script):
"""Remove a script.
@param script the SheriffScript object to remove.
"""
if self._active_script_context is not None:
raise RuntimeError("Script removal is not allowed while a script is running.")
if script in self._scripts:
self._scripts.remove(script)
self.script_removed(script)
else:
raise ValueError("Unknown script [%s]", script.name)
def _get_action_commands(self, ident_type, ident):
if ident_type == "cmd":
return self.get_commands_by_id(ident)
elif ident_type == "group":
return self.get_commands_by_group(ident)
elif ident_type == "everything":
return self.get_all_commands()
else:
raise ValueError("Invalid ident_type %s" % ident_type)
def check_script_for_errors(self, script, path_to_root=None):
"""Check a script object for errors that would prevent its
execution. Possible errors include a command or group mentioned in the
script not being found by the sheriff.
@param script a SheriffScript object to inspect
@return a list of error messages. If the list is not empty, then each
error message indicates a problem with the script. Otherwise, the script
can be executed.
"""
if path_to_root is None:
path_to_root = []
err_msgs = []
check_subscripts = True
if path_to_root and script in path_to_root:
err_msgs.append("Infinite loop: script %s eventually calls itself" % script.name)
check_subscripts = False
for action in script.actions:
if action.action_type in \
[ "start", "stop", "restart", "wait_status" ]:
if action.ident_type == "cmd":
if not self.get_commands_by_id(action.ident):
err_msgs.append("No such command: %s" % action.ident)
elif action.ident_type == "group":
if not self.get_commands_by_group(action.ident):
err_msgs.append("No such group: %s" % action.ident)
elif action.action_type == "wait_ms":
if action.delay_ms < 0:
err_msgs.append("Wait times must be nonnegative")
elif action.action_type == "run_script":
# action is to run another script.
subscript = self.get_script(action.script_name)
if subscript is None:
# couldn't find that script. error out
err_msgs.append("Unknown script \"%s\"" % \
action.script_name)
elif check_subscripts:
# Recursively check the caleld script for errors.
path = path_to_root + [script]
sub_messages = self.check_script_for_errors(subscript,
path)
parstr = "->".join([s.name for s in (path + [subscript])])
for msg in sub_messages:
err_msgs.append("%s - %s" % (parstr, msg))
else:
err_msgs.append("Unrecognized action %s" % action.action_type)
return err_msgs
def _finish_script_execution(self):
script = self._active_script_context.script
self._active_script_context = None
self._waiting_on_commands = []
self._waiting_for_status = None
if script:
self.script_finished(script)
def _check_wait_action_status(self):
if not self._waiting_on_commands:
return
# hack.. don't execute actions faster than 10 Hz
time_elapsed_ms = (_now_utime() - self._last_script_action_time) * 1000
if time_elapsed_ms < 100:
return
if self._waiting_for_status == "running":
acceptable_statuses = RUNNING
elif self._waiting_for_status == "stopped":
acceptable_statuses = [ STOPPED_OK, STOPPED_ERROR ]
else:
raise ValueError("Invalid desired status %s" % \
self._waiting_for_status)
for cmd in self._waiting_on_commands:
if cmd.status() not in acceptable_statuses:
return
# all commands passed the status check. schedule the next action
self._waiting_on_commands = []
self._waiting_for_status = None
gobject.timeout_add(0, self._execute_next_script_action)
def _execute_next_script_action(self):
# make sure there's an active script
if not self._active_script_context:
return False
action = self._active_script_context.get_next_action()
if action is None:
# no more actions, script is done.
self._finish_script_execution()
return False
assert action.action_type != "run_script"
self.script_action_executing(self._active_script_context.script, action)
# fixed time wait -- just set a GObject timer to call this function
# again
if action.action_type == "wait_ms":
gobject.timeout_add(action.delay_ms,
self._execute_next_script_action)
return False
# find the commands that we're operating on
cmds = self._get_action_commands(action.ident_type, action.ident)
self._last_script_action_time = _now_utime()
# execute an immediate action if applicable
if action.action_type == "start":
for cmd in cmds:
self.start_command(cmd)
elif action.action_type == "stop":
for cmd in cmds:
self.stop_command(cmd)
elif action.action_type == "restart":
for cmd in cmds:
self.restart_command(cmd)
# do we need to wait for the commands to achieve a desired status?
if action.wait_status:
# yes
self._waiting_on_commands = cmds
self._waiting_for_status = action.wait_status
self._check_wait_action_status()
else:
# no. Just move on
gobject.timeout_add(0, self._execute_next_script_action)
return False
def execute_script(self, script):
"""Starts executing a script. If another script is executing, then
that script is aborted first. Calling this method executes the first
action in the script. Other actions will be invoked during LCM message
handling and by the GLib event loop (e.g., timers).
@param script a sheriff_script.SheriffScript object to execute
@sa get_script()
@return a list of error messages. If the list is not empty, then each
error message indicates a problem with the script. Otherwise, the script
has successfully started execution if the returned list is empty.
"""
if self._active_script_context:
self.abort_script()
errors = self.check_script_for_errors(script)
if errors:
return errors
self._active_script_context = ScriptExecutionContext(self, script)
self.script_started(script)
self._execute_next_script_action()
def abort_script(self):
"""Cancels execution of the active script."""
self._finish_script_execution()
def load_config(self, config_node, merge_with_existing):
"""
config_node should be an instance of sheriff_config.ConfigNode
"""
if self._is_observer:
raise ValueError("Can't load config in Observer mode")
# always replace scripts.
for script in self._scripts[:]:
self.remove_script(script)
current_command_strs = set()
if merge_with_existing:
# if merging new config with existing commands, then build an index
# of the existing commands.
for dep in self._deputies.values():
for cmd in dep._commands.values():
cmdstr = "%s!%s!%s!%s!%s" % (dep.name, cmd.exec_str, cmd.command_id, cmd.group, cmd.auto_respawn)
current_command_strs.add(cmdstr)
else:
# remove all current commands if we're not merging.
for dep in self._deputies.values():
for cmd in dep._commands.values():
self.schedule_command_for_removal(cmd)
commands_to_add = []
def add_group_commands(group_node, name_prefix):
for cmd_node in group_node.commands:
auto_respawn = cmd_node.attributes.get("auto_respawn", "").lower() in [ "true", "yes" ]
assert group_node.name == cmd_node.attributes["group"]
add_command = True
# if merging is enabled, then only add this command if we don't
# have an entry for it already.
if merge_with_existing:
cmdstr = "%s!%s!%s!%s!%s" % (cmd_node.attributes["host"], cmd_node.attributes["exec"],
cmd_node.attributes["nickname"], name_prefix + group_node.name, str(auto_respawn))
if cmdstr in current_command_strs:
add_command = False
if add_command:
spec = SheriffCommandSpec()
spec.deputy_name = cmd_node.attributes["host"]
spec.exec_str = cmd_node.attributes["exec"]
spec.command_id = cmd_node.attributes["nickname"]
spec.group_name = name_prefix + group_node.name
spec.auto_respawn = auto_respawn
spec.stop_signal = cmd_node.attributes["stop_signal"]
spec.stop_time_allowed = cmd_node.attributes["stop_time_allowed"]
if spec.stop_signal == 0:
spec.stop_signal = DEFAULT_STOP_SIGNAL
if spec.stop_time_allowed == 0:
spec.stop_time_allowed = DEFAULT_STOP_TIME_ALLOWED
commands_to_add.append(spec)
for subgroup in group_node.subgroups.values():
if group_node.name:
add_group_commands(subgroup, name_prefix + group_node.name + "/")
else:
add_group_commands(subgroup, "")
add_group_commands(config_node.root_group, "")
for spec in commands_to_add:
self.add_command(spec)
# _dbg("[%s] %s (%s) -> %s" % (newcmd.group, newcmd.exec_str, newcmd.nickname, cmd.attributes["host"]))
for script_node in config_node.scripts.values():
self.add_script(SheriffScript.from_script_node(script_node))
def save_config(self, file_obj):
"""Write the current sheriff configuration to the specified file
object. The current sheriff configuration consists of all commands
managed by all deputies along with their settings, and all scripts as
well. This information is written out to the specified file object,
which can then be loaded into the sheriff again at a later point in
time.
@param file_obj a file object for saving the current sheriff configuration
"""
config_node = sheriff_config.ConfigNode()
for deputy in self._deputies.values():
for cmd in deputy._commands.values():
cmd_node = sheriff_config.CommandNode()
cmd_node.attributes["exec"] = cmd.exec_str
cmd_node.attributes["nickname"] = cmd.command_id
cmd_node.attributes["host"] = deputy.name
if cmd.auto_respawn:
cmd_node.attributes["auto_respawn"] = "true"
group = config_node.get_group(cmd.group, True)
group.add_command(cmd_node)
for script in self._scripts:
config_node.add_script(script.toScriptNode())
file_obj.write(str(config_node))
def main():
def usage():
print "usage: sheriff.py [config_file [script_name]]"
args = sys.argv[:]
if args:
usage()
cfg = None
script_name = None
if len(args) > 0:
cfg_fname = args[0]
cfg = sheriff_config.config_from_filename(cfg_fname)
if len(args) > 1:
script_name = args[1]
comms = lcm.LCM()
sheriff = Sheriff(comms)
if cfg is not None:
sheriff.load_config(cfg, False)
if script_name is not None:
script = sheriff.get_script(script_name)
if not script:
print "ERROR! Uknown script %s" % script_name
sys.exit(1)
errors = sheriff.execute_script(script)
if errors:
print "ERROR! Unable to execute script:\n%s" % "\n ".join(errors)
sys.exit(1)
sheriff.deputy_info_received.connect(\
lambda s, dep: sys.stdout.write("deputy info received from %s\n" %
dep.name))
mainloop = gobject.MainLoop()
gobject.io_add_watch(comms, gobject.IO_IN, lambda *s: comms.handle() or True)
gobject.timeout_add(1000, lambda *s: sheriff.send_orders() or True)
mainloop.run()
if __name__ == "__main__":
main()
| lgpl-3.0 |
OSSOS/MOP | src/ossos/core/scripts/process.py | 1 | 1397 |
from ossos.pipeline import mk_mopheader, mkpsf, step1, slow
from ossos import util, storage
import logging
import sys
import os
import shutil
util.config_logging(logging.INFO)
version='p'
force=False
dry_run=False
prefix=''
lines = open(sys.argv[1]).readlines()
basedir=os.getcwd()
for line in lines:
expnum = int(line.strip())
for ccd in storage.get_ccdlist(expnum):
try:
os.chdir(basedir)
if not os.access(str(expnum),os.F_OK):
os.mkdir(str(expnum))
os.chdir(str(expnum))
if not os.access(str(ccd), os.F_OK):
os.mkdir(str(ccd))
os.chdir(str(ccd))
try:
print(os.getcwd())
mk_mopheader.run(expnum, ccd=ccd, version=version, dry_run=dry_run, prefix='', force=force, ignore_dependency=False)
mkpsf.run(expnum, ccd=ccd, version=version, dry_run=dry_run, prefix=prefix, force=force)
step1.run(expnum, ccd=ccd, version=version, dry_run=dry_run, prefix=prefix, force=force)
slow.run(expnum, ccd, version=version, dry_run=dry_run, prefix=prefix, force=force)
except Exception as ex:
print(ex)
except Exception as ex:
print(ex)
finally:
os.chdir(basedir)
shutil.rmtree("{}/{}".format(expnum, ccd), ignore_errors=True)
| gpl-3.0 |
grpc/grpc | src/python/grpcio/grpc/aio/_call.py | 8 | 23750 | # Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Asyncio Python."""
import asyncio
import enum
import inspect
import logging
from functools import partial
from typing import AsyncIterable, Optional, Tuple
import grpc
from grpc import _common
from grpc._cython import cygrpc
from . import _base_call
from ._metadata import Metadata
from ._typing import (DeserializingFunction, DoneCallbackType, MetadatumType,
RequestIterableType, RequestType, ResponseType,
SerializingFunction)
__all__ = 'AioRpcError', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall'
_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
_GC_CANCELLATION_DETAILS = 'Cancelled upon garbage collection!'
_RPC_ALREADY_FINISHED_DETAILS = 'RPC already finished.'
_RPC_HALF_CLOSED_DETAILS = 'RPC is half closed after calling "done_writing".'
_API_STYLE_ERROR = 'The iterator and read/write APIs may not be mixed on a single RPC.'
_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
_LOGGER = logging.getLogger(__name__)
class AioRpcError(grpc.RpcError):
"""An implementation of RpcError to be used by the asynchronous API.
Raised RpcError is a snapshot of the final status of the RPC, values are
determined. Hence, its methods no longer needs to be coroutines.
"""
_code: grpc.StatusCode
_details: Optional[str]
_initial_metadata: Optional[Metadata]
_trailing_metadata: Optional[Metadata]
_debug_error_string: Optional[str]
def __init__(self,
code: grpc.StatusCode,
initial_metadata: Metadata,
trailing_metadata: Metadata,
details: Optional[str] = None,
debug_error_string: Optional[str] = None) -> None:
"""Constructor.
Args:
code: The status code with which the RPC has been finalized.
details: Optional details explaining the reason of the error.
initial_metadata: Optional initial metadata that could be sent by the
Server.
trailing_metadata: Optional metadata that could be sent by the Server.
"""
super().__init__(self)
self._code = code
self._details = details
self._initial_metadata = initial_metadata
self._trailing_metadata = trailing_metadata
self._debug_error_string = debug_error_string
def code(self) -> grpc.StatusCode:
"""Accesses the status code sent by the server.
Returns:
The `grpc.StatusCode` status code.
"""
return self._code
def details(self) -> Optional[str]:
"""Accesses the details sent by the server.
Returns:
The description of the error.
"""
return self._details
def initial_metadata(self) -> Metadata:
"""Accesses the initial metadata sent by the server.
Returns:
The initial metadata received.
"""
return self._initial_metadata
def trailing_metadata(self) -> Metadata:
"""Accesses the trailing metadata sent by the server.
Returns:
The trailing metadata received.
"""
return self._trailing_metadata
def debug_error_string(self) -> str:
"""Accesses the debug error string sent by the server.
Returns:
The debug error string received.
"""
return self._debug_error_string
def _repr(self) -> str:
"""Assembles the error string for the RPC error."""
return _NON_OK_CALL_REPRESENTATION.format(self.__class__.__name__,
self._code, self._details,
self._debug_error_string)
def __repr__(self) -> str:
return self._repr()
def __str__(self) -> str:
return self._repr()
def _create_rpc_error(initial_metadata: Metadata,
status: cygrpc.AioRpcStatus) -> AioRpcError:
return AioRpcError(
_common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()],
Metadata.from_tuple(initial_metadata),
Metadata.from_tuple(status.trailing_metadata()),
details=status.details(),
debug_error_string=status.debug_error_string(),
)
class Call:
"""Base implementation of client RPC Call object.
Implements logic around final status, metadata and cancellation.
"""
_loop: asyncio.AbstractEventLoop
_code: grpc.StatusCode
_cython_call: cygrpc._AioCall
_metadata: Tuple[MetadatumType]
_request_serializer: SerializingFunction
_response_deserializer: DeserializingFunction
def __init__(self, cython_call: cygrpc._AioCall, metadata: Metadata,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._cython_call = cython_call
self._metadata = tuple(metadata)
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __del__(self) -> None:
# The '_cython_call' object might be destructed before Call object
if hasattr(self, '_cython_call'):
if not self._cython_call.done():
self._cancel(_GC_CANCELLATION_DETAILS)
def cancelled(self) -> bool:
return self._cython_call.cancelled()
def _cancel(self, details: str) -> bool:
"""Forwards the application cancellation reasoning."""
if not self._cython_call.done():
self._cython_call.cancel(details)
return True
else:
return False
def cancel(self) -> bool:
return self._cancel(_LOCAL_CANCELLATION_DETAILS)
def done(self) -> bool:
return self._cython_call.done()
def add_done_callback(self, callback: DoneCallbackType) -> None:
cb = partial(callback, self)
self._cython_call.add_done_callback(cb)
def time_remaining(self) -> Optional[float]:
return self._cython_call.time_remaining()
async def initial_metadata(self) -> Metadata:
raw_metadata_tuple = await self._cython_call.initial_metadata()
return Metadata.from_tuple(raw_metadata_tuple)
async def trailing_metadata(self) -> Metadata:
raw_metadata_tuple = (await
self._cython_call.status()).trailing_metadata()
return Metadata.from_tuple(raw_metadata_tuple)
async def code(self) -> grpc.StatusCode:
cygrpc_code = (await self._cython_call.status()).code()
return _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[cygrpc_code]
async def details(self) -> str:
return (await self._cython_call.status()).details()
async def debug_error_string(self) -> str:
return (await self._cython_call.status()).debug_error_string()
async def _raise_for_status(self) -> None:
if self._cython_call.is_locally_cancelled():
raise asyncio.CancelledError()
code = await self.code()
if code != grpc.StatusCode.OK:
raise _create_rpc_error(await self.initial_metadata(), await
self._cython_call.status())
def _repr(self) -> str:
return repr(self._cython_call)
def __repr__(self) -> str:
return self._repr()
def __str__(self) -> str:
return self._repr()
class _APIStyle(enum.IntEnum):
UNKNOWN = 0
ASYNC_GENERATOR = 1
READER_WRITER = 2
class _UnaryResponseMixin(Call):
_call_response: asyncio.Task
def _init_unary_response_mixin(self, response_task: asyncio.Task):
self._call_response = response_task
def cancel(self) -> bool:
if super().cancel():
self._call_response.cancel()
return True
else:
return False
def __await__(self) -> ResponseType:
"""Wait till the ongoing RPC request finishes."""
try:
response = yield from self._call_response
except asyncio.CancelledError:
# Even if we caught all other CancelledError, there is still
# this corner case. If the application cancels immediately after
# the Call object is created, we will observe this
# `CancelledError`.
if not self.cancelled():
self.cancel()
raise
# NOTE(lidiz) If we raise RpcError in the task, and users doesn't
# 'await' on it. AsyncIO will log 'Task exception was never retrieved'.
# Instead, if we move the exception raising here, the spam stops.
# Unfortunately, there can only be one 'yield from' in '__await__'. So,
# we need to access the private instance variable.
if response is cygrpc.EOF:
if self._cython_call.is_locally_cancelled():
raise asyncio.CancelledError()
else:
raise _create_rpc_error(self._cython_call._initial_metadata,
self._cython_call._status)
else:
return response
class _StreamResponseMixin(Call):
_message_aiter: AsyncIterable[ResponseType]
_preparation: asyncio.Task
_response_style: _APIStyle
def _init_stream_response_mixin(self, preparation: asyncio.Task):
self._message_aiter = None
self._preparation = preparation
self._response_style = _APIStyle.UNKNOWN
def _update_response_style(self, style: _APIStyle):
if self._response_style is _APIStyle.UNKNOWN:
self._response_style = style
elif self._response_style is not style:
raise cygrpc.UsageError(_API_STYLE_ERROR)
def cancel(self) -> bool:
if super().cancel():
self._preparation.cancel()
return True
else:
return False
async def _fetch_stream_responses(self) -> ResponseType:
message = await self._read()
while message is not cygrpc.EOF:
yield message
message = await self._read()
# If the read operation failed, Core should explain why.
await self._raise_for_status()
def __aiter__(self) -> AsyncIterable[ResponseType]:
self._update_response_style(_APIStyle.ASYNC_GENERATOR)
if self._message_aiter is None:
self._message_aiter = self._fetch_stream_responses()
return self._message_aiter
async def _read(self) -> ResponseType:
# Wait for the request being sent
await self._preparation
# Reads response message from Core
try:
raw_response = await self._cython_call.receive_serialized_message()
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
await self._raise_for_status()
if raw_response is cygrpc.EOF:
return cygrpc.EOF
else:
return _common.deserialize(raw_response,
self._response_deserializer)
async def read(self) -> ResponseType:
if self.done():
await self._raise_for_status()
return cygrpc.EOF
self._update_response_style(_APIStyle.READER_WRITER)
response_message = await self._read()
if response_message is cygrpc.EOF:
# If the read operation failed, Core should explain why.
await self._raise_for_status()
return response_message
class _StreamRequestMixin(Call):
_metadata_sent: asyncio.Event
_done_writing_flag: bool
_async_request_poller: Optional[asyncio.Task]
_request_style: _APIStyle
def _init_stream_request_mixin(
self, request_iterator: Optional[RequestIterableType]):
self._metadata_sent = asyncio.Event(loop=self._loop)
self._done_writing_flag = False
# If user passes in an async iterator, create a consumer Task.
if request_iterator is not None:
self._async_request_poller = self._loop.create_task(
self._consume_request_iterator(request_iterator))
self._request_style = _APIStyle.ASYNC_GENERATOR
else:
self._async_request_poller = None
self._request_style = _APIStyle.READER_WRITER
def _raise_for_different_style(self, style: _APIStyle):
if self._request_style is not style:
raise cygrpc.UsageError(_API_STYLE_ERROR)
def cancel(self) -> bool:
if super().cancel():
if self._async_request_poller is not None:
self._async_request_poller.cancel()
return True
else:
return False
def _metadata_sent_observer(self):
self._metadata_sent.set()
async def _consume_request_iterator(
self, request_iterator: RequestIterableType) -> None:
try:
if inspect.isasyncgen(request_iterator) or hasattr(
request_iterator, '__aiter__'):
async for request in request_iterator:
await self._write(request)
else:
for request in request_iterator:
await self._write(request)
await self._done_writing()
except AioRpcError as rpc_error:
# Rpc status should be exposed through other API. Exceptions raised
# within this Task won't be retrieved by another coroutine. It's
# better to suppress the error than spamming users' screen.
_LOGGER.debug('Exception while consuming the request_iterator: %s',
rpc_error)
async def _write(self, request: RequestType) -> None:
if self.done():
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
if self._done_writing_flag:
raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
if not self._metadata_sent.is_set():
await self._metadata_sent.wait()
if self.done():
await self._raise_for_status()
serialized_request = _common.serialize(request,
self._request_serializer)
try:
await self._cython_call.send_serialized_message(serialized_request)
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
await self._raise_for_status()
async def _done_writing(self) -> None:
if self.done():
# If the RPC is finished, do nothing.
return
if not self._done_writing_flag:
# If the done writing is not sent before, try to send it.
self._done_writing_flag = True
try:
await self._cython_call.send_receive_close()
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
await self._raise_for_status()
async def write(self, request: RequestType) -> None:
self._raise_for_different_style(_APIStyle.READER_WRITER)
await self._write(request)
async def done_writing(self) -> None:
"""Signal peer that client is done writing.
This method is idempotent.
"""
self._raise_for_different_style(_APIStyle.READER_WRITER)
await self._done_writing()
async def wait_for_connection(self) -> None:
await self._metadata_sent.wait()
if self.done():
await self._raise_for_status()
class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
"""Object for managing unary-unary RPC calls.
Returned when an instance of `UnaryUnaryMultiCallable` object is called.
"""
_request: RequestType
_invocation_task: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self, request: RequestType, deadline: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
super().__init__(
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._request = request
self._invocation_task = loop.create_task(self._invoke())
self._init_unary_response_mixin(self._invocation_task)
async def _invoke(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
self._request_serializer)
# NOTE(lidiz) asyncio.CancelledError is not a good transport for status,
# because the asyncio.Task class do not cache the exception object.
# https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785
try:
serialized_response = await self._cython_call.unary_unary(
serialized_request, self._metadata)
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
if self._cython_call.is_ok():
return _common.deserialize(serialized_response,
self._response_deserializer)
else:
return cygrpc.EOF
async def wait_for_connection(self) -> None:
await self._invocation_task
if self.done():
await self._raise_for_status()
class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
"""Object for managing unary-stream RPC calls.
Returned when an instance of `UnaryStreamMultiCallable` object is called.
"""
_request: RequestType
_send_unary_request_task: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self, request: RequestType, deadline: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
super().__init__(
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._request = request
self._send_unary_request_task = loop.create_task(
self._send_unary_request())
self._init_stream_response_mixin(self._send_unary_request_task)
async def _send_unary_request(self) -> ResponseType:
serialized_request = _common.serialize(self._request,
self._request_serializer)
try:
await self._cython_call.initiate_unary_stream(
serialized_request, self._metadata)
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
raise
async def wait_for_connection(self) -> None:
await self._send_unary_request_task
if self.done():
await self._raise_for_status()
class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
_base_call.StreamUnaryCall):
"""Object for managing stream-unary RPC calls.
Returned when an instance of `StreamUnaryMultiCallable` object is called.
"""
# pylint: disable=too-many-arguments
def __init__(self, request_iterator: Optional[RequestIterableType],
deadline: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
super().__init__(
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._init_stream_request_mixin(request_iterator)
self._init_unary_response_mixin(loop.create_task(self._conduct_rpc()))
async def _conduct_rpc(self) -> ResponseType:
try:
serialized_response = await self._cython_call.stream_unary(
self._metadata, self._metadata_sent_observer)
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
if self._cython_call.is_ok():
return _common.deserialize(serialized_response,
self._response_deserializer)
else:
return cygrpc.EOF
class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call,
_base_call.StreamStreamCall):
"""Object for managing stream-stream RPC calls.
Returned when an instance of `StreamStreamMultiCallable` object is called.
"""
_initializer: asyncio.Task
# pylint: disable=too-many-arguments
def __init__(self, request_iterator: Optional[RequestIterableType],
deadline: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
super().__init__(
channel.call(method, deadline, credentials, wait_for_ready),
metadata, request_serializer, response_deserializer, loop)
self._initializer = self._loop.create_task(self._prepare_rpc())
self._init_stream_request_mixin(request_iterator)
self._init_stream_response_mixin(self._initializer)
async def _prepare_rpc(self):
"""This method prepares the RPC for receiving/sending messages.
All other operations around the stream should only happen after the
completion of this method.
"""
try:
await self._cython_call.initiate_stream_stream(
self._metadata, self._metadata_sent_observer)
except asyncio.CancelledError:
if not self.cancelled():
self.cancel()
# No need to raise RpcError here, because no one will `await` this task.
| apache-2.0 |
vlachoudis/sl4a | python/src/Tools/framer/framer/function.py | 48 | 4246 | """Functions."""
from framer import template
from framer.util import cstring, unindent
METH_O = "METH_O"
METH_NOARGS = "METH_NOARGS"
METH_VARARGS = "METH_VARARGS"
def parsefmt(fmt):
for c in fmt:
if c == '|':
continue
yield c
class Argument:
def __init__(self, name):
self.name = name
self.ctype = "PyObject *"
self.default = None
def __str__(self):
return "%s%s" % (self.ctype, self.name)
def setfmt(self, code):
self.ctype = self._codes[code]
if self.ctype[-1] != "*":
self.ctype += " "
_codes = {"O": "PyObject *",
"i": "int",
}
def decl(self):
if self.default is None:
return str(self) + ";"
else:
return "%s = %s;" % (self, self.default)
class _ArgumentList(object):
# these instance variables should be initialized by subclasses
ml_meth = None
fmt = None
def __init__(self, args):
self.args = map(Argument, args)
def __len__(self):
return len(self.args)
def __getitem__(self, i):
return self.args[i]
def dump_decls(self, f):
pass
class NoArgs(_ArgumentList):
def __init__(self, args):
assert len(args) == 0
super(NoArgs, self).__init__(args)
self.ml_meth = METH_NOARGS
def c_args(self):
return "PyObject *self"
class OneArg(_ArgumentList):
def __init__(self, args):
assert len(args) == 1
super(OneArg, self).__init__(args)
self.ml_meth = METH_O
def c_args(self):
return "PyObject *self, %s" % self.args[0]
class VarArgs(_ArgumentList):
def __init__(self, args, fmt=None):
super(VarArgs, self).__init__(args)
self.ml_meth = METH_VARARGS
if fmt is not None:
self.fmt = fmt
i = 0
for code in parsefmt(fmt):
self.args[i].setfmt(code)
i += 1
def c_args(self):
return "PyObject *self, PyObject *args"
def targets(self):
return ", ".join(["&%s" % a.name for a in self.args])
def dump_decls(self, f):
for a in self.args:
print >> f, " %s" % a.decl()
def ArgumentList(func, method):
code = func.func_code
args = code.co_varnames[:code.co_argcount]
if method:
args = args[1:]
pyarg = getattr(func, "pyarg", None)
if pyarg is not None:
args = VarArgs(args, pyarg)
if func.func_defaults:
L = list(func.func_defaults)
ndefault = len(L)
i = len(args) - ndefault
while L:
args[i].default = L.pop(0)
return args
else:
if len(args) == 0:
return NoArgs(args)
elif len(args) == 1:
return OneArg(args)
else:
return VarArgs(args)
class Function:
method = False
def __init__(self, func, parent):
self._func = func
self._parent = parent
self.analyze()
self.initvars()
def dump(self, f):
def p(templ, vars=None): # helper function to generate output
if vars is None:
vars = self.vars
print >> f, templ % vars
if self.__doc__:
p(template.docstring)
d = {"name" : self.vars["CName"],
"args" : self.args.c_args(),
}
p(template.funcdef_start, d)
self.args.dump_decls(f)
if self.args.ml_meth == METH_VARARGS:
p(template.varargs)
p(template.funcdef_end)
def analyze(self):
self.__doc__ = self._func.__doc__
self.args = ArgumentList(self._func, self.method)
def initvars(self):
v = self.vars = {}
v["PythonName"] = self._func.__name__
s = v["CName"] = "%s_%s" % (self._parent.name, self._func.__name__)
v["DocstringVar"] = s + "_doc"
v["MethType"] = self.args.ml_meth
if self.__doc__:
v["Docstring"] = cstring(unindent(self.__doc__))
if self.args.fmt is not None:
v["ArgParse"] = self.args.fmt
v["ArgTargets"] = self.args.targets()
class Method(Function):
method = True
| apache-2.0 |
akaariai/django | django/db/backends/base/base.py | 103 | 17962 | import time
import warnings
from collections import deque
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils.functional import cached_property
from django.utils.six.moves import _thread as thread
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
SchemaEditorClass = None
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if self.queries_logged:
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = self.make_cursor(self._cursor())
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""
Check the autocommit state.
"""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Tests if the database connection is usable.
This function may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if not (self.allow_thread_sharing
or self._thread_ident == thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries_log.
"""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""
Creates a cursor without debug logging.
"""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
try:
yield cursor
finally:
cursor.close()
if must_close:
self.close()
@cached_property
def _nodb_connection(self):
"""
Alternative connection to be used when there is no need to access
the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = None
nodb_connection = self.__class__(
settings_dict,
alias=NO_DB_ALIAS,
allow_thread_sharing=False)
return nodb_connection
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError(
'subclasses of BaseDatabaseWrapper may require a '
'_start_transaction_under_autocommit() method'
)
def schema_editor(self, *args, **kwargs):
"""
Returns a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
| bsd-3-clause |
rrahmati/roboinstruct-2 | demonstrate_ros_package/scripts/record_demonstration.py | 1 | 15168 | #! /usr/bin/python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
import sys
import os
from os.path import expanduser
import signal
import threading
from multiprocessing import Pool
import time
from random import randint
from std_msgs.msg import Float32MultiArray
from leap_client.msg import HandInfoList
def signal_handler(signal, frame):
global record_demonstratio
n
record_demonstration.end_thread = True
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class RecordDemonstration(object):
def __init__(self):
# parameters
self.task = 3006
# person controlling the robot: 1-Rouhollah, 2-Pooya
self.user_id = 1
self.image_shape = (540, 540)
self.recordDelay = .03
self.camera1 = True
self.camera2 = False
self.camera3 = False
self.al5d = True
self.mico = False
self.task_description = {
5000: "Human demonstrations",
3001: "Grab a bubble wrap and put it into plate",
3002: "Push the plate to the left",
3003: "Push the box towards the robot's base",
3004: "Push and roll the bottle towards the robot's base",
3005: "Pick up the towel and clean the screwdriver box",
3006: "rotate the pliers wrench to a perpendicular orientation",
# first camera calibration:
1001: "Put three small objects into the container",
1002: "Grab a pen and put it into user's hand",
1003: "Take the stirring bar from the user, stir a coffee cup, give it back to the user",
1004: "Grab capsules from the table and put them into their bottle",
1005: "Grab a paper cup and pour its content into a plate",
1006: "Push all small cubes and gather them in the middle of table",
1007: "The small towel is already folded. fold it one more time",
1008: "Grab a paper cup and put it into a tea cup",
1009: "Grab the spoon and fork and put them into the plate, spoon on right, fork on left",
1010: "Pick up a thick marker and put it into upright position",
1011: "Push and rotate the markers and gather them close to the robot base",
1012: "Stay in the middle position. Don't move!",
1013: "Pick up a mug and place it on the table where the user is pointing",
1014: "scoop ...",
# second camera calibration:
1501: "Grab 6 small cubes in a cluttered situation and put them into a plate",
1502: "Grab a marker and put it into the cup. Then, put it back on the table.",
# second camera calibration, each task 5 minutes, 10,000 waypoints
2001: "Grab 3 small markers and arrange them vertically on the right side",
2002: "Grab 3 small markers and arrange them horizontally on the right side",
2003: "Grab 3 small markers and arrange them vertically on the left side",
2004: "Grab 3 small markers and arrange them horizontally on the left side",
2005: "Grab 3 small markers and make a triangle with them",
2006: "Grab 3 small markers, put one on the left, one on the right, and one in the middle",
2007: "Grab 3 small markers and make a horizontal line with them",
2008: "Grab 3 small markers and write the character Y with them",
2009: "Grab 3 small markers and write the character U with them",
2010: "Grab 3 small markers and write the character H with them",
2011: "Grab 3 small markers and write the character N with them",
2012: "Grab 3 small markers and write the character T with them",
2013: "Grab 3 small markers and write the reversed character N with them",
2014: "Grab 3 small markers and write the reversed character Y with them",
2015: "Grab 3 small markers and write the reversed character U with them",
2016: "Grab 3 small markers and write the 90 degree rotated character H with them",
2017: "Grab 3 small markers and write the reversed character T with them",
2018: "Grab 3 small markers and write the character K with them",
2019: "Grab 3 small markers, put one vertically on the right, and two vertically on the left",
2020: "Grab 3 small markers, put one vertically on the left, and two vertically on the right",
2021: "Grab 3 small markers, put one horizontally on the right, and two horizontally on the left",
2022: "Grab 3 small markers, put one horizontally on the left, and two horizontally on the right",
2023: "Grab 3 small markers, put one vertically on the right, and two horizontally on the left",
2024: "Grab 3 small markers, put one horizontally on the left, and two vertically on the right",
2025: "Grab 3 small markers, put one vertically on the right, and make a vertical line with the other two",
2026: "Grab 3 small markers, put one vertically on the left, and make a vertical line with the other two",
2027: "Grab 3 small markers, put one vertically on the right, and make a horizontal line with the other two",
2028: "Grab 3 small markers, put one vertically on the left, and make a horizontal line with the other two",
2029: "Grab 3 small markers and put them into the coffee cup on the right",
2030: "Grab 3 small markers that are inside a coffee cup on the right and put them on the desk",
2031: "Grab 3 small markers and put them into the coffee cup on the left",
2032: "Grab 3 small markers that are inside a coffee cup on the left and put them on the desk",
2033: "Grab 3 small markers, put one into the coffee cup on the left, and the others into the coffee cup on the right",
2034: "Grab 3 small markers, put one into the coffee cup on the right, and the others into the coffee cup on the left",
2035: "Grab 2 small markers, put one into the coffee cup on the right, and the other into the coffee cup on the left",
2036: "Grab 2 small markers, put one into the coffee cup on the left, and the other into the coffee cup on the right",
2037: "Grab one small marker from each coffee cup and put them on the desk",
2038: "Grab one small marker from the coffee cup on the right and put it into the coffee cup on the left",
2039: "Grab one small marker from the coffee cup on the left and put it into the coffee cup on the right",
2040: "Grab 4 small markers and make a square with them",
2041: "Grab 4 small markers and make a cross with them",
2042: "Grab 4 small markers and make a 45 degree rotated square with them",
2043: "Grab 4 small markers and make a plus with them",
2044: "Grab 4 small markers, put one vertically on the right and three vertically on the left",
2045: "Grab 4 small markers, put one horizontally on the right and three vertically on the left",
2046: "Grab 4 small markers, put one vertically on the right and three horizontally on the left",
2047: "Grab 4 small markers, put one horizontally on the right and three horizontally on the left",
2048: "Grab 4 small markers, put two vertically on the right and two vertically on the left",
2049: "Grab 4 small markers, put two horizontally on the right and two vertically on the left",
2050: "Grab 4 small markers, put two vertically on the right and two horizontally on the left",
2051: "Grab 4 small markers, put two horizontally on the right and two horizontally on the left",
2052: "Grab 4 small markers and draw the bottom half of a star with them",
2053: "Grab 4 small markers and draw the upper half of a star with them",
2054: "Grab 4 small markers and draw the character '=' with them",
2055: "Grab 4 small markers and draw the 90 degree rotated character '=' with them",
2056: "Grab 4 small markers and draw the character 'W' with them",
2057: "Grab 4 small markers and draw the character 'M' with them",
2058: "Grab 4 small markers and draw the character 'E' with them",
2059: "Grab 4 small markers and draw the reversed character 'E' with them",
2060: "Grab 4 small markers and draw the character 'm' with them",
2061: "Grab 4 small markers and draw the reversed character 'm' with them",
}
# initialization
self.filepath = expanduser("~") + '/t/task-' + str(self.task) + '/' + str(randint(0,1000000))
rospy.init_node('record_demonstration')
if self.camera1:
self.create_folders(self.filepath + '/camera-' + str(1) + '/')
# self.create_folders(self.filepath + '/camera-' + str(1) + '-depth/')
rospy.Subscriber("/kinect2/qhd/image_color_rect", Image, self.camera1_callback)
# rospy.Subscriber("/kinect2/hd/image_depth_rect", Image, self.camera1_depth_callback)
if self.camera2:
self.create_folders(self.filepath + '/camera-' + str(2) + '/')
rospy.Subscriber("/usb_cam/image_raw", Image, self.camera2_callback)
if self.camera3:
self.create_folders(self.filepath + '/camera-' + str(3) + '/')
rospy.Subscriber("/kinect2/qhd/image_color_rect", Image, self.camera3_callback)
if self.al5d:
self.write_file_header()
rospy.Subscriber("/leap_al5d_info", Float32MultiArray, self.leap_al5d_callback)
if self.mico:
self.write_file_header()
rospy.Subscriber("/leap_mico_info", Float32MultiArray, self.leap_mico_callback)
self.bridge = CvBridge()
self.timestep = 0
self.task_complete_count = 0
self.rate = rospy.Rate(self.recordDelay*1000)
self.last_reward_time = 0
self.last_robot_msg = 0
self.start_time = rospy.get_time()
self.end_thread = False
self.pause = False
# self.pool = Pool(2)
self.thread = threading.Thread(target= self._update_thread)
self.thread.start()
def save_image(self, img_msg, camera):
try:
img = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
img = np.array(img, dtype=np.float)
except CvBridgeError, e:
print(e)
else:
img = img[0:540, 250:840]
img = cv2.resize(img, self.image_shape)
cv2.imwrite(self.filepath + '/camera-' + str(camera) + '/' + str(self.timestep) +
'.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
def save_image_depth(self, img_msg, camera):
try:
img = self.bridge.imgmsg_to_cv2(img_msg, "16UC1")
img = np.array(img, dtype=np.float32)
cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX)
except CvBridgeError, e:
print(e)
else:
img = cv2.resize(img, self.image_shape)
cv2.imwrite(self.filepath + '/camera-' + str(camera) + '-depth/' + str(self.timestep) +
'.jpg', img*255.0, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
def camera1_callback(self, msg):
self.camera1_msg = msg
def camera1_depth_callback(self, msg):
self.camera1_depth_msg = msg
def camera2_callback(self, msg):
self.camera2_msg = msg
def camera3_callback(self, msg):
self.camera3_msg = msg
def leap_al5d_callback(self, msg):
self.leap_al5d_msg = msg
self.last_robot_msg = rospy.get_time()
def leap_mico_callback(self, msg):
self.leap_mico_msg = msg
def create_folders(self, foldername):
if not os.path.exists(foldername):
try:
os.makedirs(foldername)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def write_file_header(self):
with open(self.filepath + '.txt', 'w') as f:
f.write(str(time.strftime('%l:%M%p %z on %b %d, %Y')) + '\n' + str(self.task_description[self.task]) + '\n')
f.write('time,task,user,robot,reward,human,gripper,joint1,joint2,joint3,joint4,joint5,joint6')
def append_to_file(self, robot):
with open(self.filepath + '.txt', 'a') as f:
str_to_append = '\n' + str(rospy.get_time() - self.start_time) + ',' + str(self.task) + ',' + str(self.user_id) + ','
if robot == 'al5d':
str_to_append = str_to_append + str(1) + ','
data = [x for x in self.leap_al5d_msg.data]
elif robot == 'mico':
str_to_append = str_to_append + str(2) + ','
data = [x for x in self.leap_mico_msg.data]
if abs(data[0] - 1) < .01: # got reward
if rospy.get_time() - self.last_reward_time > 1:
self.task_complete_count += 1
self.last_reward_time = rospy.get_time()
else:
data[0] = 0
sys.stdout.write('\rTimestep: ' + str(self.timestep) + ' Task done: ' + str(self.task_complete_count))
sys.stdout.flush()
str_to_append = str_to_append + ','.join(str(e) for e in data)
f.write(str_to_append)
def _update_thread(self):
while not rospy.is_shutdown() and not self.end_thread:
if self.pause or rospy.get_time() - self.start_time < 1 or rospy.get_time() - self.last_robot_msg > .1:
continue
save_files = (self.camera1 == hasattr(self, 'camera1_msg') and self.camera2 == hasattr(self, 'camera2_msg')
and self.camera3 == hasattr(self, 'camera3_msg') and self.al5d == hasattr(self, 'leap_al5d_msg')
and self.mico == hasattr(self, 'leap_mico_msg'))
if save_files:
if self.camera1:
# # self.pool.map(self.save_image, [(self.camera1_msg, 1)])
self.save_image(self.camera1_msg, 1)
# self.save_image_depth(self.camera1_depth_msg, 1)
if self.camera2:
# self.pool.map(self.save_image, [(self.camera2_msg, 2)])
self.save_image(self.camera2_msg, 2)
if self.camera3:
self.save_image(self.camera2_msg, 3)
if self.al5d:
self.append_to_file('al5d')
if self.mico:
self.append_to_file('mico')
self.timestep += 1
self.rate.sleep()
def main():
global record_demonstration
record_demonstration = RecordDemonstration()
rospy.spin()
# while not rospy.is_shutdown() and not record_demonstration.end_thread:
# input = raw_input(">>>")
# record_demonstration.pause = not record_demonstration.pause
if __name__ == '__main__':
main()
| mit |
meletakis/collato | lib/python2.7/site-packages/endless_pagination/tests/integration/test_twitter.py | 9 | 1615 | """Twitter-style pagination integration tests."""
from __future__ import unicode_literals
from endless_pagination.tests.integration import SeleniumTestCase
class TwitterPaginationTest(SeleniumTestCase):
view_name = 'twitter'
def test_new_elements_loaded(self):
# Ensure a new page is loaded on click.
self.get()
with self.assertNewElements('object', range(1, 11)):
self.click_link(self.MORE)
def test_url_not_changed(self):
# Ensure the request is done using Ajax (the page does not refresh).
self.get()
with self.assertSameURL():
self.click_link(self.MORE)
def test_direct_link(self):
# Ensure direct links work.
self.get(page=4)
self.assertElements('object', range(16, 21))
self.assertIn('page=4', self.selenium.current_url)
def test_subsequent_page(self):
# Ensure next page is correctly loaded in a subsequent page.
self.get(page=2)
with self.assertNewElements('object', range(6, 16)):
self.click_link(self.MORE)
def test_multiple_show_more(self):
# Ensure new pages are loaded again and again.
self.get()
for page in range(2, 5):
expected_range = range(1, 5 * page + 1)
with self.assertSameURL():
with self.assertNewElements('object', expected_range):
self.click_link(self.MORE)
def test_no_more_link_in_last_page(self):
# Ensure there is no more link on the last page.
self.get(page=10)
self.asserLinksEqual(0, self.MORE)
| gpl-2.0 |
jied83/kernel_back | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
andreparrish/python-for-android | python-modules/twisted/twisted/test/time_helpers.py | 59 | 2005 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Helper class to writing deterministic time-based unit tests.
Do not use this module. It is a lie. See L{twisted.internet.task.Clock}
instead.
"""
import warnings
warnings.warn(
"twisted.test.time_helpers is deprecated since Twisted 10.0. "
"See twisted.internet.task.Clock instead.",
category=DeprecationWarning, stacklevel=2)
class Clock(object):
"""
A utility for monkey-patches various parts of Twisted to use a
simulated timing mechanism. DO NOT use this class. Use
L{twisted.internet.task.Clock}.
"""
rightNow = 0.0
def __call__(self):
"""
Return the current simulated time.
"""
return self.rightNow
def install(self):
"""
Monkeypatch L{twisted.internet.reactor.seconds} to use
L{__call__} as a time source
"""
# Violation is fun.
from twisted.internet import reactor
self.reactor_original = reactor.seconds
reactor.seconds = self
def uninstall(self):
"""
Remove the monkeypatching of L{twisted.internet.reactor.seconds}.
"""
from twisted.internet import reactor
reactor.seconds = self.reactor_original
def adjust(self, amount):
"""
Adjust the current simulated time upward by the given C{amount}.
Note that this does not cause any scheduled calls to be run.
"""
self.rightNow += amount
def pump(self, reactor, timings):
"""
Iterate the given C{reactor} with increments of time specified
by C{timings}.
For each timing, the simulated time will be L{adjust}ed and
the reactor will be iterated twice.
"""
timings = list(timings)
timings.reverse()
self.adjust(timings.pop())
while timings:
self.adjust(timings.pop())
reactor.iterate()
reactor.iterate()
| apache-2.0 |
Dryra/SosAnimauxWeb | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| mit |
krishnazure/Flask | Work/Trivia - Module 5/env/Lib/site-packages/jinja2/ext.py | 603 | 25078 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import next, with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| apache-2.0 |
nuigroup/kivy | kivy/modules/touchring.py | 3 | 2215 | '''
Touchring
=========
Show ring around every touch on the table. You can use this module for checking
if you don't have any calibration trouble with touches.
Configuration
-------------
:Parameters:
`image`: str, default to '<kivy>/data/images/ring.png'
Filename of the image to use.
`scale`: float, default to 1.
Scale of the image.
`alpha`: float, default to 1.
Opacity of the image
Example
-------
In your configuration (`~/.kivy/config.ini`), you can write something like
this::
[modules]
touchring = image=mypointer.png,scale=.3,alpha=.7
'''
__all__ = ('start', 'stop')
from kivy.core.image import Image
from kivy.graphics import Color, Rectangle
pointer_image = None
pointer_scale = 1.0
pointer_alpha = 0.7
def _touch_down(win, touch):
ud = touch.ud
touch.scale_for_screen(win.width, win.height)
with win.canvas.after:
ud['tr.color'] = Color(1, 1, 1, pointer_alpha)
iw, ih = pointer_image.size
ud['tr.rect'] = Rectangle(
pos=(
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale)),
size=(iw * pointer_scale, ih * pointer_scale),
texture=pointer_image.texture)
def _touch_move(win, touch):
ud = touch.ud
ud['tr.rect'].pos = (
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale))
def _touch_up(win, touch):
ud = touch.ud
win.canvas.after.remove(ud['tr.color'])
win.canvas.after.remove(ud['tr.rect'])
def start(win, ctx):
# XXX use ctx !
global pointer_image, pointer_scale, pointer_alpha
pointer_fn = ctx.config.get('image',
'atlas://data/images/defaulttheme/ring')
pointer_scale = float(ctx.config.get('scale', 1.0))
pointer_alpha = float(ctx.config.get('alpha', 1.0))
pointer_image = Image(pointer_fn)
win.bind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
def stop(win, ctx):
win.unbind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
| lgpl-3.0 |
pulilab/rapidpro | temba/orgs/migrations/0032_fix_org_with_nexmo_config.py | 4 | 4386 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 09:17
from __future__ import unicode_literals
import json
from uuid import uuid4
import nexmo as nx
import six
from django.conf import settings
from django.core.cache import cache
from django.db import migrations
from django.urls import reverse
from temba.ivr.clients import NexmoClient
from temba.orgs.models import NEXMO_KEY, NEXMO_SECRET, NEXMO_UUID, NEXMO_APP_ID, NEXMO_APP_PRIVATE_KEY
def update_nexmo_config(Org):
if settings.IS_PROD:
nexmo_orgs = Org.objects.filter(config__icontains='NEXMO_KEY')
updated_orgs = set()
failed_orgs = set()
for org in nexmo_orgs:
try:
config = json.loads(org.config) if org.config else {}
nexmo_api_key = config.get(NEXMO_KEY, None)
nexmo_secret = config.get(NEXMO_SECRET, None)
nexmo_uuid = str(uuid4())
nx_client = nx.Client(key=nexmo_api_key, secret=nexmo_secret)
app_name = "%s/%s" % (settings.TEMBA_HOST.lower(), nexmo_uuid)
answer_url = reverse('handlers.nexmo_call_handler', args=['answer', nexmo_uuid])
event_url = reverse('handlers.nexmo_call_handler', args=['event', nexmo_uuid])
params = dict(name=app_name, type='voice', answer_url=answer_url, answer_method='POST',
event_url=event_url, event_method='POST')
response = nx_client.create_application(params=params)
app_id = response.get('id', None)
private_key = response.get("keys", dict()).get("private_key", None)
config[NEXMO_APP_ID] = app_id
config[NEXMO_APP_PRIVATE_KEY] = private_key
config[NEXMO_UUID] = nexmo_uuid
org.config = json.dumps(config)
org.save()
org_channels = org.channels.exclude(channel_type='A')
# clear all our channel configurations
for channel in org_channels:
key = 'channel_config:%d' % channel.id
cache.delete(key)
# for NX channels update the roles according to features available on Nexmo
nexmo_client = NexmoClient(nexmo_api_key, nexmo_secret, app_id, private_key, org=org)
org_nexmo_channels = org.channels.filter(channel_type='NX')
for channel in org_nexmo_channels:
mo_path = reverse('handlers.nexmo_handler', args=['receive', nexmo_uuid])
nexmo_client.update_nexmo_number(six.text_type(channel.country), channel.address,
'https://%s%s' % (settings.TEMBA_HOST, mo_path),
app_id)
nexmo_phones = nexmo_client.get_numbers(channel.address)
features = [elt.upper() for elt in nexmo_phones[0]['features']]
role = ''
if 'SMS' in features:
role += 'S' + 'R' # Channel.ROLE_SEND + Channel.ROLE_RECEIVE
if 'VOICE' in features:
role += 'A' + 'C' # Channel.ROLE_ANSWER + Channel.ROLE_CALL
channel.role = role
channel.save()
updated_orgs.add(org.pk)
print("Migrations successfully updated nexmo config for Org %d" % org.pk)
except Exception as e:
print("Migrations failed to update nexmo config for org %d with error %s" % (org.pk, e.message))
failed_orgs.add(org.pk)
print("Migrations finished updating nexmo config UPDATED: %d orgs , FAILED: %d orgs" % (len(updated_orgs), len(failed_orgs)))
print("=" * 80)
print("Updated orgs: %s" % updated_orgs)
print("Failed orgs: %s" % failed_orgs)
def apply_as_migration(apps, schema_editor):
Org = apps.get_model('orgs', 'Org')
update_nexmo_config(Org)
def apply_manual():
from temba.orgs.models import Org
update_nexmo_config(Org)
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('orgs', '0031_is_squashed'),
]
operations = [
migrations.RunPython(apply_as_migration, noop)
]
| agpl-3.0 |
nloopa/linux-reiser4 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/encodings/cp1258.py | 593 | 13620 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
mistercrunch/airflow | docs/exts/redirects.py | 5 | 2708 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Based on: https://github.com/sphinx-contrib/redirects"""
import os
from sphinx.builders import html as builders
from sphinx.util import logging
TEMPLATE = '<html><head><meta http-equiv="refresh" content="0; url={}"/></head></html>'
log = logging.getLogger(__name__)
def generate_redirects(app):
"""Generate redirects files."""
redirect_file_path = os.path.join(app.srcdir, app.config.redirects_file)
if not os.path.exists(redirect_file_path):
log.info("Could not found the redirect file: %s", redirect_file_path)
return
in_suffix = next(iter(app.config.source_suffix.keys()))
if not isinstance(app.builder, builders.StandaloneHTMLBuilder):
return
with open(redirect_file_path) as redirects:
for line in redirects.readlines():
# Skip empty line
if not line.strip():
continue
# Skip comments
if line.startswith("#"):
continue
from_path, _, to_path = line.rstrip().partition(" ")
log.debug("Redirecting '%s' to '%s'", from_path, to_path)
from_path = from_path.replace(in_suffix, '.html')
to_path = to_path.replace(in_suffix, ".html")
to_path_prefix = "..%s" % os.path.sep * (len(from_path.split(os.path.sep)) - 1)
to_path = to_path_prefix + to_path
log.debug("Resolved redirect '%s' to '%s'", from_path, to_path)
redirected_filename = os.path.join(app.builder.outdir, from_path)
redirected_directory = os.path.dirname(redirected_filename)
os.makedirs(redirected_directory, exist_ok=True)
with open(redirected_filename, "w") as f:
f.write(TEMPLATE.format(to_path))
def setup(app):
"""Setup plugin"""
app.add_config_value("redirects_file", "redirects", "env")
app.connect("builder-inited", generate_redirects)
| apache-2.0 |
hobson/pug-invest | pug/invest/bin/fit-test.py | 1 | 4498 | from statsmodels.tsa import arima_model
import numpy as np
from pug.invest import util
y = util.simulate(poly=100, sinusoids=(10, 100, -20)).values
hr = np.arange(365*96)*.25
t = hr * 3600
sinusoids = [
np.random.normal(0.0, 0.1, 365*96)+10 + 3*np.sin(hr*2*np.pi/96/.25),
np.random.normal(0.0, 0.1, 365*96)+15 + 3*np.sin(hr*2*np.pi/96/.25) + 3*np.cos(t*2*np.pi/96./.25/365.),
np.random.normal(0.0, 1.0, 365*96)+15 + 3*np.sin(hr*2*np.pi/96/.25) + 3*np.cos(t*2*np.pi/96./.25/365.)+np.random.normal(0.0,1e-5,365*96).cumsum()]
arma20 = arima_model.ARMA(y, (2,0)).fit()
y2 = arma.predict(start=10*96, end=12*96)
y1 = y[10*96-1:12*96]
plt.plot(t[10*96-1:12*96],zip(*[y1,y2]))
plt.show()
y2 = arma30.predict(start=10*96, end=12*96)
plt.plot(t[10*96-1:12*96],zip(*[y1,y2]))
plt.show()
arma30.resid.plot()
plt.plot(arma30.resid)
plt.show()
plt.plot(arma30.resid/y2)
plt.plot(arma30.resid/y)
plt.show()
plt.plot(arma30.resid/y)
plt.show()
arma30 = arima_model.ARMA(y[:-96*30], (2,0)).fit()
y1 = y[-32*96:]
y2 = arma30.predict(start=N-32*96, end=N-28*96)
N=len(y)
y2 = arma30.predict(start=N-32*96, end=N-28*96)
plt.plot(t[-32*96-1:-28*96],zip(*[y1,y2]))
plt.show()
plt.plot(t[-32*96-1:-28*96],zip(*[y1,y2]))
plt.show()
N
arma30 = arima_model.ARMA(y[:-96*30], (3,0)).fit()
N_predict=len(y[:-96*30])
y_predict=y[:-96*30]
y2 = arma30.predict(start=N_predict,end=N_predict+96)
y1 = y[N_predict:N_predict+96]
y1-y2
y2 = arma30.predict(start=N_predict,end=N_predict+95)
plt.plot(zip(*[y1,y2]))
plt.plot(zip(*[y1,y2]))
plt.show()
arma41 = arima_model.ARMA(y_train, (4,1)).fit()
y_train=y[:-96*30]
arma41 = arima_model.ARMA(y_train, (4,1)).fit()
arma296 = arima_model.ARMA(y_train, (2,96)).fit()
arma296 = arima_model.ARMA(y_train.diff(), (2,96)).fit()
arma296 = arima_model.ARMA(pd.Series(y_train).diff(), (2,96)).fit()
import pandas as pd
y_diff = pd.Series(y).diff().values()
y_diff = pd.Series(y).diff().values
y_train=y_diff[:-96*30]
arma296 = arima_model.ARMA(y_train, (2,96)).fit()
arma296 = arima_model.ARMA(y_train, (2,0)).fit()
arma296 = arima_model.ARMA(y_train[1:], (2,0)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (2,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*7:], (2,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*2:], (2,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*3:], (2,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*4:], (2,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (2,48)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (2,24)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (0,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (1,96)).fit()
arma296 = arima_model.ARMA(y_train[-96*14:], (1,96)).fit(meth='mle')
arma296 = arima_model.ARMA(y_train[-96*14:], (1,96)).fit(meth='css')
arma296 = arima_model.ARMA(np.diff(y_train[-96*14:]).dropna(), (1,96)).fit(meth='css')
arma296 = arima_model.ARMA(np.diff(y_train[-96*14:])[1:], (2,96)).fit(meth='css')
arma296 = arima_model.ARMA(np.diff(y_train[-96*14:])[1:], (2,96)).fit(meth='mle')
arma296 = arima_model.ARMA(np.diff(y_train[-96*14:])[1:], (2,96))
arma296.fit(trend='c',solver='bfgs')
arma296.fit(trend='c',solver='bfgs',transparams=True)
arma296.fit(trend='c',solver='bfgs',transparams=False)
arma296._fit_start_params
arma296._fit_start_params()
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False)
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=True)
q = np.zeros(96)
q[0] = 1
q[-1]=1
q[-1]=.5
q[0] = .1
q[-1]=.9
p=[10, 1.2, -.2]
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=True,startparams=[p,q])
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=True,start_params=[p,q])
np.log
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False,start_params=[p,q])
p=np.array([10, 1.2, -.2])
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False,start_params=[p,q])
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False,start_params=np.array([p,q]))
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False,start_params=q)
q.shape
q = np.zeros(93)
q[-1]=.9
q[0]=.1
arma296.fit(meth='css-mle',trend='c',solver='bfgs',transparams=False,start_params=q)
arma296.fit(trend='c',solver='bfgs',transparams=False,start_params=q)
arma296.fit(trend='c',transparams=False,start_params=q)
arma296.fit(transparams=False,start_params=q)
len(q)
p=np.array([10, 1.2, -.2])
q = np.zeros(99)
q[0]=.1
q[0]=10
q[1]=1
q[2]=-.2
q[-1]=.95
arma296.fit(transparams=False,start_params=q)
| mit |
romain-dartigues/ansible | lib/ansible/plugins/filter/urlsplit.py | 146 | 1136 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'urlsplit': split_url
}
| gpl-3.0 |
gmacchi93/serverInfoParaguay | apps/venv/lib/python2.7/site-packages/setuptools/extension.py | 284 | 1404 | import sys
import distutils.core
import distutils.extension
from setuptools.dist import _get_unpatched
_Extension = _get_unpatched(distutils.core.Extension)
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, *args, **kw):
_Extension.__init__(self, *args, **kw)
if not have_pyrex():
self._convert_pyx_sources_to_c()
def _convert_pyx_sources_to_c(self):
"convert .pyx extensions to .c"
def pyx_to_c(source):
if source.endswith('.pyx'):
source = source[:-4] + '.c'
return source
self.sources = list(map(pyx_to_c, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| apache-2.0 |
ChromiumWebApps/chromium | mojo/public/bindings/generators/mojom_js_generator.py | 1 | 7742 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates JavaScript source files from a mojom.Module."""
from generate import mojom
from generate import mojom_pack
from generate import mojom_generator
from generate.template_expander import UseJinja
_kind_to_javascript_default_value = {
mojom.BOOL: "false",
mojom.INT8: "0",
mojom.UINT8: "0",
mojom.INT16: "0",
mojom.UINT16: "0",
mojom.INT32: "0",
mojom.UINT32: "0",
mojom.FLOAT: "0",
mojom.HANDLE: "core.kInvalidHandle",
mojom.DCPIPE: "core.kInvalidHandle",
mojom.DPPIPE: "core.kInvalidHandle",
mojom.MSGPIPE: "core.kInvalidHandle",
mojom.INT64: "0",
mojom.UINT64: "0",
mojom.DOUBLE: "0",
mojom.STRING: '""',
}
def JavaScriptDefaultValue(field):
if field.default:
raise Exception("Default values should've been handled in jinja.")
if field.kind in mojom.PRIMITIVES:
return _kind_to_javascript_default_value[field.kind]
if isinstance(field.kind, mojom.Struct):
return "null";
if isinstance(field.kind, mojom.Array):
return "[]";
if isinstance(field.kind, mojom.Interface):
return _kind_to_javascript_default_value[mojom.MSGPIPE]
def JavaScriptPayloadSize(packed):
packed_fields = packed.packed_fields
if not packed_fields:
return 0;
last_field = packed_fields[-1]
offset = last_field.offset + last_field.size
pad = mojom_pack.GetPad(offset, 8)
return offset + pad;
_kind_to_javascript_type = {
mojom.BOOL: "codec.Uint8",
mojom.INT8: "codec.Int8",
mojom.UINT8: "codec.Uint8",
mojom.INT16: "codec.Int16",
mojom.UINT16: "codec.Uint16",
mojom.INT32: "codec.Int32",
mojom.UINT32: "codec.Uint32",
mojom.FLOAT: "codec.Float",
mojom.HANDLE: "codec.Handle",
mojom.DCPIPE: "codec.Handle",
mojom.DPPIPE: "codec.Handle",
mojom.MSGPIPE: "codec.Handle",
mojom.INT64: "codec.Int64",
mojom.UINT64: "codec.Uint64",
mojom.DOUBLE: "codec.Double",
mojom.STRING: "codec.String",
}
def GetJavaScriptType(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_type[kind]
if isinstance(kind, mojom.Struct):
return "new codec.PointerTo(%s)" % GetJavaScriptType(kind.name)
if isinstance(kind, mojom.Array):
return "new codec.ArrayOf(%s)" % GetJavaScriptType(kind.kind)
if isinstance(kind, mojom.Interface):
return GetJavaScriptType(mojom.MSGPIPE)
return kind
_kind_to_javascript_decode_snippet = {
mojom.BOOL: "read8() & 1",
mojom.INT8: "read8()",
mojom.UINT8: "read8()",
mojom.INT16: "read16()",
mojom.UINT16: "read16()",
mojom.INT32: "read32()",
mojom.UINT32: "read32()",
mojom.FLOAT: "decodeFloat()",
mojom.HANDLE: "decodeHandle()",
mojom.DCPIPE: "decodeHandle()",
mojom.DPPIPE: "decodeHandle()",
mojom.MSGPIPE: "decodeHandle()",
mojom.INT64: "read64()",
mojom.UINT64: "read64()",
mojom.DOUBLE: "decodeDouble()",
mojom.STRING: "decodeStringPointer()",
}
def JavaScriptDecodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_decode_snippet[kind]
if isinstance(kind, mojom.Struct):
return "decodeStructPointer(%s)" % GetJavaScriptType(kind.name);
if isinstance(kind, mojom.Array):
return "decodeArrayPointer(%s)" % GetJavaScriptType(kind.kind);
if isinstance(kind, mojom.Interface):
return JavaScriptDecodeSnippet(mojom.MSGPIPE)
_kind_to_javascript_encode_snippet = {
mojom.BOOL: "write8(1 & ",
mojom.INT8: "write8(",
mojom.UINT8: "write8(",
mojom.INT16: "write16(",
mojom.UINT16: "write16(",
mojom.INT32: "write32(",
mojom.UINT32: "write32(",
mojom.FLOAT: "encodeFloat(",
mojom.HANDLE: "encodeHandle(",
mojom.DCPIPE: "encodeHandle(",
mojom.DPPIPE: "encodeHandle(",
mojom.MSGPIPE: "encodeHandle(",
mojom.INT64: "write64(",
mojom.UINT64: "write64(",
mojom.DOUBLE: "encodeDouble(",
mojom.STRING: "encodeStringPointer(",
}
def JavaScriptEncodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_encode_snippet[kind]
if isinstance(kind, mojom.Struct):
return "encodeStructPointer(%s, " % GetJavaScriptType(kind.name);
if isinstance(kind, mojom.Array):
return "encodeArrayPointer(%s, " % GetJavaScriptType(kind.kind);
if isinstance(kind, mojom.Interface):
return JavaScriptEncodeSnippet(mojom.MSGPIPE)
def GetConstants(module):
"""Returns a generator that enumerates all constants that can be referenced
from this module."""
class Constant:
pass
for enum in module.enums:
for field in enum.fields:
constant = Constant()
constant.namespace = module.namespace
constant.is_current_namespace = True
constant.import_item = None
constant.name = (enum.name, field.name)
yield constant
for each in module.imports:
for enum in each["module"].enums:
for field in enum.fields:
constant = Constant()
constant.namespace = each["namespace"]
constant.is_current_namespace = constant.namespace == module.namespace
constant.import_item = each
constant.name = (enum.name, field.name)
yield constant
def TranslateConstants(value, module):
# We're assuming we're dealing with an identifier, but that may not be
# the case. If we're not, we just won't find any matches.
if value.find(".") != -1:
namespace, identifier = value.split(".")
else:
namespace, identifier = "", value
for constant in GetConstants(module):
if namespace == constant.namespace or (
namespace == "" and constant.is_current_namespace):
if constant.name[1] == identifier:
if constant.import_item:
return "%s.%s.%s" % (constant.import_item["unique_name"],
constant.name[0], constant.name[1])
else:
return "%s.%s" % (constant.name[0], constant.name[1])
return value
def ExpressionToText(value, module):
if value[0] != "EXPRESSION":
raise Exception("Expected EXPRESSION, got" + value)
return "".join(mojom_generator.ExpressionMapper(value,
lambda token: TranslateConstants(token, module)))
def JavascriptType(kind):
if kind.imported_from:
return kind.imported_from["unique_name"] + "." + kind.name
return kind.name
class Generator(mojom_generator.Generator):
js_filters = {
"camel_to_underscores": mojom_generator.CamelToUnderscores,
"default_value": JavaScriptDefaultValue,
"payload_size": JavaScriptPayloadSize,
"decode_snippet": JavaScriptDecodeSnippet,
"encode_snippet": JavaScriptEncodeSnippet,
"expression_to_text": ExpressionToText,
"is_object_kind": mojom_generator.IsObjectKind,
"is_string_kind": mojom_generator.IsStringKind,
"is_array_kind": lambda kind: isinstance(kind, mojom.Array),
"js_type": JavascriptType,
"stylize_method": mojom_generator.StudlyCapsToCamel,
"verify_token_type": mojom_generator.VerifyTokenType,
}
@UseJinja("js_templates/module.js.tmpl", filters=js_filters)
def GenerateJsModule(self):
return {
"imports": self.GetImports(),
"kinds": self.module.kinds,
"enums": self.module.enums,
"module": self.module,
"structs": self.GetStructs() + self.GetStructsFromMethods(),
"interfaces": self.module.interfaces,
}
def GenerateFiles(self):
self.Write(self.GenerateJsModule(), "%s.js" % self.module.name)
def GetImports(self):
# Since each import is assigned a variable in JS, they need to have unique
# names.
counter = 1
for each in self.module.imports:
each["unique_name"] = "import" + str(counter)
counter += 1
return self.module.imports
| bsd-3-clause |
etashjian/ECE757-final | src/cpu/o3/O3CPU.py | 14 | 7870 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from BaseCPU import BaseCPU
from FUPool import *
from O3Checker import O3Checker
from BranchPredictor import BranchPredictor
class DerivO3CPU(BaseCPU):
type = 'DerivO3CPU'
cxx_header = 'cpu/o3/deriv.hh'
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
activity = Param.Unsigned(0, "Initial count")
cachePorts = Param.Unsigned(200, "Cache Ports")
decodeToFetchDelay = Param.Cycles(1, "Decode to fetch delay")
renameToFetchDelay = Param.Cycles(1 ,"Rename to fetch delay")
iewToFetchDelay = Param.Cycles(1, "Issue/Execute/Writeback to fetch "
"delay")
commitToFetchDelay = Param.Cycles(1, "Commit to fetch delay")
fetchWidth = Param.Unsigned(8, "Fetch width")
fetchBufferSize = Param.Unsigned(64, "Fetch buffer size in bytes")
fetchQueueSize = Param.Unsigned(32, "Fetch queue size in micro-ops "
"per-thread")
renameToDecodeDelay = Param.Cycles(1, "Rename to decode delay")
iewToDecodeDelay = Param.Cycles(1, "Issue/Execute/Writeback to decode "
"delay")
commitToDecodeDelay = Param.Cycles(1, "Commit to decode delay")
fetchToDecodeDelay = Param.Cycles(1, "Fetch to decode delay")
decodeWidth = Param.Unsigned(8, "Decode width")
iewToRenameDelay = Param.Cycles(1, "Issue/Execute/Writeback to rename "
"delay")
commitToRenameDelay = Param.Cycles(1, "Commit to rename delay")
decodeToRenameDelay = Param.Cycles(1, "Decode to rename delay")
renameWidth = Param.Unsigned(8, "Rename width")
commitToIEWDelay = Param.Cycles(1, "Commit to "
"Issue/Execute/Writeback delay")
renameToIEWDelay = Param.Cycles(2, "Rename to "
"Issue/Execute/Writeback delay")
issueToExecuteDelay = Param.Cycles(1, "Issue to execute delay (internal "
"to the IEW stage)")
dispatchWidth = Param.Unsigned(8, "Dispatch width")
issueWidth = Param.Unsigned(8, "Issue width")
wbWidth = Param.Unsigned(8, "Writeback width")
fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool")
iewToCommitDelay = Param.Cycles(1, "Issue/Execute/Writeback to commit "
"delay")
renameToROBDelay = Param.Cycles(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
trapLatency = Param.Cycles(13, "Trap latency")
fetchTrapLatency = Param.Cycles(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
LQEntries = Param.Unsigned(32, "Number of load queue entries")
SQEntries = Param.Unsigned(32, "Number of store queue entries")
LSQDepCheckShift = Param.Unsigned(4, "Number of places to shift addr before check")
LSQCheckLoads = Param.Bool(True,
"Should dependency violations be checked for loads & stores or just stores")
store_set_clear_period = Param.Unsigned(250000,
"Number of load/store insts before the dep predictor should be invalidated")
LFSTSize = Param.Unsigned(1024, "Last fetched store table size")
SSITSize = Param.Unsigned(1024, "Store set ID table size")
numRobs = Param.Unsigned(1, "Number of Reorder Buffers");
numPhysIntRegs = Param.Unsigned(256, "Number of physical integer registers")
numPhysFloatRegs = Param.Unsigned(256, "Number of physical floating point "
"registers")
# most ISAs don't use condition-code regs, so default is 0
_defaultNumPhysCCRegs = 0
if buildEnv['TARGET_ISA'] in ('arm','x86'):
# For x86, each CC reg is used to hold only a subset of the
# flags, so we need 4-5 times the number of CC regs as
# physical integer regs to be sure we don't run out. In
# typical real machines, CC regs are not explicitly renamed
# (it's a side effect of int reg renaming), so they should
# never be the bottleneck here.
_defaultNumPhysCCRegs = Self.numPhysIntRegs * 5
numPhysCCRegs = Param.Unsigned(_defaultNumPhysCCRegs,
"Number of physical cc registers")
numIQEntries = Param.Unsigned(64, "Number of instruction queue entries")
numROBEntries = Param.Unsigned(192, "Number of reorder buffer entries")
smtNumFetchingThreads = Param.Unsigned(1, "SMT Number of Fetching Threads")
smtFetchPolicy = Param.String('SingleThread', "SMT Fetch policy")
smtLSQPolicy = Param.String('Partitioned', "SMT LSQ Sharing Policy")
smtLSQThreshold = Param.Int(100, "SMT LSQ Threshold Sharing Parameter")
smtIQPolicy = Param.String('Partitioned', "SMT IQ Sharing Policy")
smtIQThreshold = Param.Int(100, "SMT IQ Threshold Sharing Parameter")
smtROBPolicy = Param.String('Partitioned', "SMT ROB Sharing Policy")
smtROBThreshold = Param.Int(100, "SMT ROB Threshold Sharing Parameter")
smtCommitPolicy = Param.String('RoundRobin', "SMT Commit Policy")
branchPred = Param.BranchPredictor(BranchPredictor(numThreads =
Parent.numThreads),
"Branch Predictor")
needsTSO = Param.Bool(buildEnv['TARGET_ISA'] == 'x86',
"Enable TSO Memory model")
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
from ArmTLB import ArmTLB
self.checker = O3Checker(workload=self.workload,
exitOnError=False,
updateOnError=True,
warnOnlyOnLoadError=True)
self.checker.itb = ArmTLB(size = self.itb.size)
self.checker.dtb = ArmTLB(size = self.dtb.size)
self.checker.cpu_id = self.cpu_id
else:
print "ERROR: Checker only supported under ARM ISA!"
exit(1)
| bsd-3-clause |
kmolab/kmolab.github.io | data/Brython-3.3.4/Lib/logging/brython_handlers.py | 1 | 1179 | import logging
from browser.ajax import ajax
class XMLHTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
req = ajax.open(self.method, self.url, async=False)
req.send(self.mapLogRecord(record))
except:
self.handleError(record)
| agpl-3.0 |
jmcarbo/openerp7 | openerp/addons/crm/crm_phonecall.py | 14 | 14638 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_state import base_state
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(base_state, osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
# base_state required fields
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection([ ('draft', 'Draft'),
('open', 'Confirmed'),
('pending', 'Not Held'),
('cancel', 'Cancelled'),
('done', 'Held'),],
string='Status', size=16, readonly=True, track_visibility='onchange',
help='The status is set to \'Todo\', when a case is created.\
If the case is in progress the status is set to \'Open\'.\
When the call is over, the status is set to \'Held\'.\
If the call needs to be done then the status is set to \'Not Held\'.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', size=64, required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help="Duration in Minutes"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone', size=32),
'partner_mobile': fields.char('Mobile', size=32),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state', False):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': crm.AVAILABLE_PRIORITIES[2][0],
'state': _get_default_state,
'user_id': lambda self,cr,uid,ctx: uid,
'active': 1
}
def case_close(self, cr, uid, ids, context=None):
""" Overrides close for crm_case for setting duration """
res = True
for phone in self.browse(cr, uid, ids, context=context):
phone_id = phone.id
data = {}
if phone.duration <=0:
duration = datetime.now() - datetime.strptime(phone.date, DEFAULT_SERVER_DATETIME_FORMAT)
data['duration'] = duration.seconds/float(60)
res = super(crm_phonecall, self).case_close(cr, uid, [phone_id], context=context)
self.write(cr, uid, [phone_id], data, context=context)
return res
def case_reset(self, cr, uid, ids, context=None):
"""Resets case as Todo
"""
res = super(crm_phonecall, self).case_reset(cr, uid, ids, context)
self.write(cr, uid, ids, {'duration': 0.0, 'state':'open'}, context=context)
return res
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.case_close(cr, uid, [new_id])
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id' : opportunity_id,
}
self.write(cr, uid, [call.id], vals)
self.case_close(cr, uid, [call.id])
opportunity.case_open(cr, uid, [opportunity_id])
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
phonecall = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_state': 'open',
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MakeHer/edx-platform | common/djangoapps/static_replace/test/test_static_replace.py | 4 | 20954 | """Tests for static_replace"""
from urllib import quote_plus
import ddt
import re
from PIL import Image
from cStringIO import StringIO
from nose.tools import assert_equals, assert_true, assert_false # pylint: disable=no-name-in-module
from static_replace import (
replace_static_urls,
replace_course_urls,
_url_replace_regex,
process_static_urls,
make_static_urls_absolute
)
from mock import patch, Mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.mongo import MongoModuleStore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls
from xmodule.modulestore.xml import XMLModuleStore
DATA_DIRECTORY = 'data_dir'
COURSE_KEY = SlashSeparatedCourseKey('org', 'course', 'run')
STATIC_SOURCE = '"/static/file.png"'
def test_multi_replace():
course_source = '"/course/file.png"'
assert_equals(
replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY),
replace_static_urls(replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY), DATA_DIRECTORY)
)
assert_equals(
replace_course_urls(course_source, COURSE_KEY),
replace_course_urls(replace_course_urls(course_source, COURSE_KEY), COURSE_KEY)
)
def test_process_url():
def processor(__, prefix, quote, rest): # pylint: disable=missing-docstring
return quote + 'test' + prefix + rest + quote
assert_equals('"test/static/file.png"', process_static_urls(STATIC_SOURCE, processor))
def test_process_url_data_dir_exists():
base = '"/static/{data_dir}/file.png"'.format(data_dir=DATA_DIRECTORY)
def processor(original, prefix, quote, rest): # pylint: disable=unused-argument,missing-docstring
return quote + 'test' + rest + quote
assert_equals(base, process_static_urls(base, processor, data_dir=DATA_DIRECTORY))
def test_process_url_no_match():
def processor(__, prefix, quote, rest): # pylint: disable=missing-docstring
return quote + 'test' + prefix + rest + quote
assert_equals('"test/static/file.png"', process_static_urls(STATIC_SOURCE, processor))
@patch('django.http.HttpRequest', autospec=True)
def test_static_urls(mock_request):
mock_request.build_absolute_uri = lambda url: 'http://' + url
result = make_static_urls_absolute(mock_request, STATIC_SOURCE)
assert_equals(result, '\"http:///static/file.png\"')
@patch('static_replace.staticfiles_storage', autospec=True)
def test_storage_url_exists(mock_storage):
mock_storage.exists.return_value = True
mock_storage.url.return_value = '/static/file.png'
assert_equals('"/static/file.png"', replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY))
mock_storage.exists.assert_called_once_with('file.png')
mock_storage.url.assert_called_once_with('file.png')
@patch('static_replace.staticfiles_storage', autospec=True)
def test_storage_url_not_exists(mock_storage):
mock_storage.exists.return_value = False
mock_storage.url.return_value = '/static/data_dir/file.png'
assert_equals('"/static/data_dir/file.png"', replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY))
mock_storage.exists.assert_called_once_with('file.png')
mock_storage.url.assert_called_once_with('data_dir/file.png')
@patch('static_replace.StaticContent', autospec=True)
@patch('static_replace.modulestore', autospec=True)
@patch('static_replace.AssetBaseUrlConfig.get_base_url')
def test_mongo_filestore(mock_get_base_url, mock_modulestore, mock_static_content):
mock_modulestore.return_value = Mock(MongoModuleStore)
mock_static_content.get_canonicalized_asset_path.return_value = "c4x://mock_url"
mock_get_base_url.return_value = u''
# No namespace => no change to path
assert_equals('"/static/data_dir/file.png"', replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY))
# Namespace => content url
assert_equals(
'"' + mock_static_content.get_canonicalized_asset_path.return_value + '"',
replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY, course_id=COURSE_KEY)
)
mock_static_content.get_canonicalized_asset_path.assert_called_once_with(COURSE_KEY, 'file.png', u'')
@patch('static_replace.settings', autospec=True)
@patch('static_replace.modulestore', autospec=True)
@patch('static_replace.staticfiles_storage', autospec=True)
def test_data_dir_fallback(mock_storage, mock_modulestore, mock_settings):
mock_modulestore.return_value = Mock(XMLModuleStore)
mock_storage.url.side_effect = Exception
mock_storage.exists.return_value = True
assert_equals('"/static/data_dir/file.png"', replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY))
mock_storage.exists.return_value = False
assert_equals('"/static/data_dir/file.png"', replace_static_urls(STATIC_SOURCE, DATA_DIRECTORY))
def test_raw_static_check():
"""
Make sure replace_static_urls leaves alone things that end in '.raw'
"""
path = '"/static/foo.png?raw"'
assert_equals(path, replace_static_urls(path, DATA_DIRECTORY))
text = 'text <tag a="/static/js/capa/protex/protex.nocache.js?raw"/><div class="'
assert_equals(path, replace_static_urls(path, text))
@patch('static_replace.staticfiles_storage', autospec=True)
@patch('static_replace.modulestore', autospec=True)
def test_static_url_with_query(mock_modulestore, mock_storage):
"""
Make sure that for urls with query params:
query params that contain "^/static/" are converted to full location urls
query params that do not contain "^/static/" are left unchanged
"""
mock_storage.exists.return_value = False
mock_modulestore.return_value = Mock(MongoModuleStore)
pre_text = 'EMBED src ="/static/LAlec04_controller.swf?csConfigFile=/static/LAlec04_config.xml&name1=value1&name2=value2"'
post_text = 'EMBED src ="/c4x/org/course/asset/LAlec04_controller.swf?csConfigFile=%2Fc4x%2Forg%2Fcourse%2Fasset%2FLAlec04_config.xml&name1=value1&name2=value2"'
assert_equals(post_text, replace_static_urls(pre_text, DATA_DIRECTORY, COURSE_KEY))
def test_regex():
yes = ('"/static/foo.png"',
'"/static/foo.png"',
"'/static/foo.png'")
no = ('"/not-static/foo.png"',
'"/static/foo', # no matching quote
)
regex = _url_replace_regex('/static/')
for s in yes:
print 'Should match: {0!r}'.format(s)
assert_true(re.match(regex, s))
for s in no:
print 'Should not match: {0!r}'.format(s)
assert_false(re.match(regex, s))
@ddt.ddt
class CanonicalContentTest(SharedModuleStoreTestCase):
"""
Tests the generation of canonical asset URLs for different types
of assets: c4x-style, opaque key style, locked, unlocked, CDN
set, CDN not set, etc.
"""
def setUp(self):
super(CanonicalContentTest, self).setUp()
@classmethod
def setUpClass(cls):
cls.courses = {}
super(CanonicalContentTest, cls).setUpClass()
names_and_prefixes = [(ModuleStoreEnum.Type.split, 'split'), (ModuleStoreEnum.Type.mongo, 'old')]
for store, prefix in names_and_prefixes:
with cls.store.default_store(store):
cls.courses[prefix] = CourseFactory.create(org='a', course='b', run=prefix)
# Create an unlocked image.
unlock_content = cls.create_image(prefix, (32, 32), 'blue', '{}_unlock.png')
# Create a locked image.
lock_content = cls.create_image(prefix, (32, 32), 'green', '{}_lock.png', locked=True)
# Create a thumbnail of the images.
contentstore().generate_thumbnail(unlock_content, dimensions=(16, 16))
contentstore().generate_thumbnail(lock_content, dimensions=(16, 16))
# Create an unlocked image in a subdirectory.
cls.create_image(prefix, (1, 1), 'red', 'special/{}_unlock.png')
# Create a locked image in a subdirectory.
cls.create_image(prefix, (1, 1), 'yellow', 'special/{}_lock.png', locked=True)
# Create an unlocked image with funky characters in the name.
cls.create_image(prefix, (1, 1), 'black', 'weird {}_unlock.png')
@classmethod
def create_image(cls, prefix, dimensions, color, name, locked=False):
"""
Creates an image.
Args:
prefix: the prefix to use e.g. split vs mongo
dimensions: tuple of (width, height)
color: the background color of the image
name: the name of the image; can be a format string
locked: whether or not the asset should be locked
Returns:
StaticContent: the StaticContent object for the created image
"""
new_image = Image.new('RGB', dimensions, color)
new_buf = StringIO()
new_image.save(new_buf, format='png')
new_buf.seek(0)
new_name = name.format(prefix)
new_key = StaticContent.compute_location(cls.courses[prefix].id, new_name)
new_content = StaticContent(new_key, new_name, 'image/png', new_buf.getvalue(), locked=locked)
contentstore().save(new_content)
return new_content
@ddt.data(
# No leading slash.
(u'', u'{prefix}_unlock.png', u'/{asset_key}@{prefix}_unlock.png', 1),
(u'', u'{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'', u'weird {prefix}_unlock.png', u'/{asset_key}@weird_{prefix}_unlock.png', 1),
(u'dev', u'{prefix}_unlock.png', u'//dev/{asset_key}@{prefix}_unlock.png', 1),
(u'dev', u'{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'dev', u'weird {prefix}_unlock.png', u'//dev/{asset_key}@weird_{prefix}_unlock.png', 1),
# No leading slash with subdirectory. This ensures we properly substitute slashes.
(u'', u'special/{prefix}_unlock.png', u'/{asset_key}@special_{prefix}_unlock.png', 1),
(u'', u'special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
(u'dev', u'special/{prefix}_unlock.png', u'//dev/{asset_key}@special_{prefix}_unlock.png', 1),
(u'dev', u'special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
# Leading slash.
(u'', u'/{prefix}_unlock.png', u'/{asset_key}@{prefix}_unlock.png', 1),
(u'', u'/{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'dev', u'/{prefix}_unlock.png', u'//dev/{asset_key}@{prefix}_unlock.png', 1),
(u'dev', u'/{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
# Leading slash with subdirectory. This ensures we properly substitute slashes.
(u'', u'/special/{prefix}_unlock.png', u'/{asset_key}@special_{prefix}_unlock.png', 1),
(u'', u'/special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
(u'dev', u'/special/{prefix}_unlock.png', u'//dev/{asset_key}@special_{prefix}_unlock.png', 1),
(u'dev', u'/special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
# Static path.
(u'', u'/static/{prefix}_unlock.png', u'/{asset_key}@{prefix}_unlock.png', 1),
(u'', u'/static/{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'', u'/static/weird {prefix}_unlock.png', u'/{asset_key}@weird_{prefix}_unlock.png', 1),
(u'dev', u'/static/{prefix}_unlock.png', u'//dev/{asset_key}@{prefix}_unlock.png', 1),
(u'dev', u'/static/{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'dev', u'/static/weird {prefix}_unlock.png', u'//dev/{asset_key}@weird_{prefix}_unlock.png', 1),
# Static path with subdirectory. This ensures we properly substitute slashes.
(u'', u'/static/special/{prefix}_unlock.png', u'/{asset_key}@special_{prefix}_unlock.png', 1),
(u'', u'/static/special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
(u'dev', u'/static/special/{prefix}_unlock.png', u'//dev/{asset_key}@special_{prefix}_unlock.png', 1),
(u'dev', u'/static/special/{prefix}_lock.png', u'/{asset_key}@special_{prefix}_lock.png', 1),
# Static path with query parameter.
(
u'',
u'/static/{prefix}_unlock.png?foo=/static/{prefix}_lock.png',
u'/{asset_key}@{prefix}_unlock.png?foo={encoded_asset_key}{prefix}_lock.png',
2
),
(
u'',
u'/static/{prefix}_lock.png?foo=/static/{prefix}_unlock.png',
u'/{asset_key}@{prefix}_lock.png?foo={encoded_asset_key}{prefix}_unlock.png',
2
),
(
u'dev',
u'/static/{prefix}_unlock.png?foo=/static/{prefix}_lock.png',
u'//dev/{asset_key}@{prefix}_unlock.png?foo={encoded_asset_key}{prefix}_lock.png',
2
),
(
u'dev',
u'/static/{prefix}_lock.png?foo=/static/{prefix}_unlock.png',
u'/{asset_key}@{prefix}_lock.png?foo={encoded_base_url}{encoded_asset_key}{prefix}_unlock.png',
2
),
# Already asset key.
(u'', u'/{asset_key}@{prefix}_unlock.png', u'/{asset_key}@{prefix}_unlock.png', 1),
(u'', u'/{asset_key}@{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
(u'dev', u'/{asset_key}@{prefix}_unlock.png', u'//dev/{asset_key}@{prefix}_unlock.png', 1),
(u'dev', u'/{asset_key}@{prefix}_lock.png', u'/{asset_key}@{prefix}_lock.png', 1),
# Old, c4x-style path.
(u'', u'/{c4x}/{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'', u'/{c4x}/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'', u'/{c4x}/weird_{prefix}_lock.png', u'/{c4x}/weird_{prefix}_lock.png', 1),
(u'dev', u'/{c4x}/{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'dev', u'/{c4x}/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'dev', u'/{c4x}/weird_{prefix}_unlock.png', u'/{c4x}/weird_{prefix}_unlock.png', 1),
# Thumbnails.
(u'', u'/{th_key}@{prefix}_unlock-{th_ext}', u'/{th_key}@{prefix}_unlock-{th_ext}', 1),
(u'', u'/{th_key}@{prefix}_lock-{th_ext}', u'/{th_key}@{prefix}_lock-{th_ext}', 1),
(u'dev', u'/{th_key}@{prefix}_unlock-{th_ext}', u'//dev/{th_key}@{prefix}_unlock-{th_ext}', 1),
(u'dev', u'/{th_key}@{prefix}_lock-{th_ext}', u'//dev/{th_key}@{prefix}_lock-{th_ext}', 1),
)
@ddt.unpack
def test_canonical_asset_path_with_new_style_assets(self, base_url, start, expected, mongo_calls):
prefix = 'split'
encoded_base_url = quote_plus('//' + base_url)
c4x = 'c4x/a/b/asset'
asset_key = 'asset-v1:a+b+{}+type@asset+block'.format(prefix)
encoded_asset_key = quote_plus('/asset-v1:a+b+{}+type@asset+block@'.format(prefix))
th_key = 'asset-v1:a+b+{}+type@thumbnail+block'.format(prefix)
th_ext = 'png-16x16.jpg'
start = start.format(
prefix=prefix,
c4x=c4x,
asset_key=asset_key,
encoded_base_url=encoded_base_url,
encoded_asset_key=encoded_asset_key,
th_key=th_key,
th_ext=th_ext
)
expected = expected.format(
prefix=prefix,
c4x=c4x,
asset_key=asset_key,
encoded_base_url=encoded_base_url,
encoded_asset_key=encoded_asset_key,
th_key=th_key,
th_ext=th_ext
)
with check_mongo_calls(mongo_calls):
asset_path = StaticContent.get_canonicalized_asset_path(self.courses[prefix].id, start, base_url)
self.assertEqual(asset_path, expected)
@ddt.data(
# No leading slash.
(u'', u'{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'', u'{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'', u'weird {prefix}_unlock.png', u'/{c4x}/weird_{prefix}_unlock.png', 1),
(u'dev', u'{prefix}_unlock.png', u'//dev/{c4x}/{prefix}_unlock.png', 1),
(u'dev', u'{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'dev', u'weird {prefix}_unlock.png', u'//dev/{c4x}/weird_{prefix}_unlock.png', 1),
# No leading slash with subdirectory. This ensures we probably substitute slashes.
(u'', u'special/{prefix}_unlock.png', u'/{c4x}/special_{prefix}_unlock.png', 1),
(u'', u'special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
(u'dev', u'special/{prefix}_unlock.png', u'//dev/{c4x}/special_{prefix}_unlock.png', 1),
(u'dev', u'special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
# Leading slash.
(u'', u'/{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'', u'/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'dev', u'/{prefix}_unlock.png', u'//dev/{c4x}/{prefix}_unlock.png', 1),
(u'dev', u'/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
# Leading slash with subdirectory. This ensures we properly substitute slashes.
(u'', u'/special/{prefix}_unlock.png', u'/{c4x}/special_{prefix}_unlock.png', 1),
(u'', u'/special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
(u'dev', u'/special/{prefix}_unlock.png', u'//dev/{c4x}/special_{prefix}_unlock.png', 1),
(u'dev', u'/special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
# Static path.
(u'', u'/static/{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'', u'/static/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'', u'/static/weird {prefix}_unlock.png', u'/{c4x}/weird_{prefix}_unlock.png', 1),
(u'dev', u'/static/{prefix}_unlock.png', u'//dev/{c4x}/{prefix}_unlock.png', 1),
(u'dev', u'/static/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'dev', u'/static/weird {prefix}_unlock.png', u'//dev/{c4x}/weird_{prefix}_unlock.png', 1),
# Static path with subdirectory. This ensures we properly substitute slashes.
(u'', u'/static/special/{prefix}_unlock.png', u'/{c4x}/special_{prefix}_unlock.png', 1),
(u'', u'/static/special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
(u'dev', u'/static/special/{prefix}_unlock.png', u'//dev/{c4x}/special_{prefix}_unlock.png', 1),
(u'dev', u'/static/special/{prefix}_lock.png', u'/{c4x}/special_{prefix}_lock.png', 1),
# Static path with query parameter.
(
u'',
u'/static/{prefix}_unlock.png?foo=/static/{prefix}_lock.png',
u'/{c4x}/{prefix}_unlock.png?foo={encoded_c4x}{prefix}_lock.png',
2
),
(
u'',
u'/static/{prefix}_lock.png?foo=/static/{prefix}_unlock.png',
u'/{c4x}/{prefix}_lock.png?foo={encoded_c4x}{prefix}_unlock.png',
2
),
(
u'dev',
u'/static/{prefix}_unlock.png?foo=/static/{prefix}_lock.png',
u'//dev/{c4x}/{prefix}_unlock.png?foo={encoded_c4x}{prefix}_lock.png',
2
),
(
u'dev',
u'/static/{prefix}_lock.png?foo=/static/{prefix}_unlock.png',
u'/{c4x}/{prefix}_lock.png?foo={encoded_base_url}{encoded_c4x}{prefix}_unlock.png',
2
),
# Old, c4x-style path.
(u'', u'/{c4x}/{prefix}_unlock.png', u'/{c4x}/{prefix}_unlock.png', 1),
(u'', u'/{c4x}/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'', u'/{c4x}/weird_{prefix}_unlock.png', u'/{c4x}/weird_{prefix}_unlock.png', 1),
(u'dev', u'/{c4x}/{prefix}_unlock.png', u'//dev/{c4x}/{prefix}_unlock.png', 1),
(u'dev', u'/{c4x}/{prefix}_lock.png', u'/{c4x}/{prefix}_lock.png', 1),
(u'dev', u'/{c4x}/weird_{prefix}_unlock.png', u'//dev/{c4x}/weird_{prefix}_unlock.png', 1),
)
@ddt.unpack
def test_canonical_asset_path_with_c4x_style_assets(self, base_url, start, expected, mongo_calls):
prefix = 'old'
c4x_block = 'c4x/a/b/asset'
encoded_c4x_block = quote_plus('/' + c4x_block + '/')
encoded_base_url = quote_plus('//' + base_url)
start = start.format(
prefix=prefix,
encoded_base_url=encoded_base_url,
c4x=c4x_block,
encoded_c4x=encoded_c4x_block
)
expected = expected.format(
prefix=prefix,
encoded_base_url=encoded_base_url,
c4x=c4x_block,
encoded_c4x=encoded_c4x_block
)
with check_mongo_calls(mongo_calls):
asset_path = StaticContent.get_canonicalized_asset_path(self.courses[prefix].id, start, base_url)
self.assertEqual(asset_path, expected)
| agpl-3.0 |
fuziontech/sentry | tests/sentry/models/test_team.py | 24 | 1884 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.models import OrganizationMember, OrganizationMemberTeam
from sentry.testutils import TestCase
class TeamTest(TestCase):
def test_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
assert list(team.member_set.all()) == [member]
def test_inactive_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
OrganizationMemberTeam.objects.create(
organizationmember=member,
team=team,
is_active=False
)
assert list(team.member_set.all()) == []
def test_active_basic_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
user2 = self.create_user('foo@example.com')
member = self.create_member(
user=user2,
organization=org,
has_global_access=False,
teams=[team]
)
assert member in team.member_set.all()
def test_teamless_basic_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
user2 = self.create_user('foo@example.com')
member = self.create_member(
user=user2,
organization=org,
has_global_access=False,
)
assert member not in team.member_set.all()
| bsd-3-clause |
Mozhuowen/brython | www/src/Lib/test/test_builtin.py | 28 | 56161 | # Python test set -- built-in functions
import ast
import builtins
import collections
import io
import locale
import os
import pickle
import platform
import random
import sys
import traceback
import types
import unittest
import warnings
from operator import neg
from test.support import TESTFN, unlink, run_unittest, check_warnings
try:
import pty, signal
except ImportError:
pty = signal = None
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
def filter_char(arg):
return ord(arg) > ord("d")
def map_char(arg):
return chr(ord(arg)+1)
class BuiltinTest(unittest.TestCase):
# Helper to check picklability
def check_iter_pickle(self, it, seq):
itorg = it
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
#test the iterator after dropping one from it
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# str
self.assertRaises(TypeError, abs, 'a')
# bool
self.assertEqual(abs(True), 1)
self.assertEqual(abs(False), 0)
# other
self.assertRaises(TypeError, abs)
self.assertRaises(TypeError, abs, None)
class AbsClass(object):
def __abs__(self):
return -5
self.assertEqual(abs(AbsClass()), -5)
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
# Advanced checks for unicode strings
def _check_uni(s):
self.assertEqual(ascii(s), repr(s))
_check_uni("'")
_check_uni('"')
_check_uni('"\'')
_check_uni('\0')
_check_uni('\r\n\t .')
# Unprintable non-ASCII characters
_check_uni('\x85')
_check_uni('\u1fff')
_check_uni('\U00012fff')
# Lone surrogates
_check_uni('\ud800')
_check_uni('\udfff')
# Issue #9804: surrogates should be joined even for printable
# wide characters (UCS-2 builds).
self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'")
# All together
s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx."
self.assertEqual(ascii(s),
r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
def test_callable(self):
self.assertTrue(callable(len))
self.assertFalse(callable("a"))
self.assertTrue(callable(callable))
self.assertTrue(callable(lambda x, y: x + y))
self.assertFalse(callable(__builtins__))
def f(): pass
self.assertTrue(callable(f))
class C1:
def meth(self): pass
self.assertTrue(callable(C1))
c = C1()
self.assertTrue(callable(c.meth))
self.assertFalse(callable(c))
# __call__ is looked up on the class, not the instance
c.__call__ = None
self.assertFalse(callable(c))
c.__call__ = lambda self: 0
self.assertFalse(callable(c))
del c.__call__
self.assertFalse(callable(c))
class C2(object):
def __call__(self): pass
c2 = C2()
self.assertTrue(callable(c2))
c2.__call__ = None
self.assertTrue(callable(c2))
class C3(C2): pass
c3 = C3()
self.assertTrue(callable(c3))
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str('\\U0010ffff'.encode("ascii"), 'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
# test the optimize argument
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for code in codeobjs:
ns = {}
exec(code, ns)
rv = ns['f']()
self.assertEqual(rv, (debugval, docstring))
def test_delattr(self):
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertIn('local_var', dir())
# dir(module)
self.assertIn('exit', dir(sys))
# dir(module_with_invalid__dict__)
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertIn("strip", dir(str))
self.assertNotIn("__mro__", dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertIn("y", dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertIn("__repr__", dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertNotIn("__repr__", dir(f))
self.assertIn("bar", dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__tuple)
class Foo(object):
def __dir__(self):
return ("b", "c", "a")
res = dir(Foo())
self.assertIsInstance(res, list)
self.assertTrue(res == ["a", "b", "c"])
# dir(obj__dir__not_sequence)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
# test that object has a __dir__()
self.assertEqual(sorted([].__dir__()), dir([]))
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1), (sys.maxsize+1, 0))
for num, denom, exp_result in [ (3.25, 1.0, (3.0, 0.25)),
(-3.25, 1.0, (-4.0, 0.75)),
(3.25, -1.0, (-4.0, -0.75)),
(-3.25, -1.0, (3.0, -0.25))]:
result = divmod(num, denom)
self.assertAlmostEqual(result[0], exp_result[0])
self.assertAlmostEqual(result[1], exp_result[1])
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2))
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2))
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
for stmt in (
"max(key=int)", # no args
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEqual(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
try:
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
finally:
fp.close()
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
try:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
finally:
fp.close()
unlink(TESTFN)
def test_open_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that open() uses the current locale
# encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
self.write_testfile()
current_locale_encoding = locale.getpreferredencoding(False)
fp = open(TESTFN, 'w')
try:
self.assertEqual(fp.encoding, current_locale_encoding)
finally:
fp.close()
unlink(TESTFN)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2.0:
for y in 10, 10.0:
for z in 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(TypeError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
unlink(TESTFN)
@unittest.skipUnless(pty, "the pty and signal modules must be available")
def check_input_tty(self, prompt, terminal_input, stdio_encoding=None):
if not sys.stdin.isatty() or not sys.stdout.isatty():
self.skipTest("stdin and stdout must be ttys")
r, w = os.pipe()
try:
pid, fd = pty.fork()
except (OSError, AttributeError) as e:
os.close(r)
os.close(w)
self.skipTest("pty.fork() raised {}".format(e))
if pid == 0:
# Child
try:
# Make sure we don't get stuck if there's a problem
signal.alarm(2)
os.close(r)
# Check the error handlers are accounted for
if stdio_encoding:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(),
encoding=stdio_encoding,
errors='surrogateescape')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
encoding=stdio_encoding,
errors='replace')
with open(w, "w") as wpipe:
print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe)
print(ascii(input(prompt)), file=wpipe)
except:
traceback.print_exc()
finally:
# We don't want to return to unittest...
os._exit(0)
# Parent
os.close(w)
os.write(fd, terminal_input + b"\r\n")
# Get results from the pipe
with open(r, "r") as rpipe:
lines = []
while True:
line = rpipe.readline().strip()
if line == "":
# The other end was closed => the child exited
break
lines.append(line)
# Check the result was got and corresponds to the user's terminal input
if len(lines) != 2:
# Something went wrong, try to get at stderr
with open(fd, "r", encoding="ascii", errors="ignore") as child_output:
self.fail("got %d lines in pipe but expected 2, child output was:\n%s"
% (len(lines), child_output.read()))
os.close(fd)
# Check we did exercise the GNU readline path
self.assertIn(lines[0], {'tty = True', 'tty = False'})
if lines[0] != 'tty = True':
self.skipTest("standard IO in should have been a tty")
input_result = eval(lines[1]) # ascii() -> eval() roundtrip
if stdio_encoding:
expected = terminal_input.decode(stdio_encoding, 'surrogateescape')
else:
expected = terminal_input.decode(sys.stdin.encoding) # what else?
self.assertEqual(input_result, expected)
def test_input_tty(self):
# Test input() functionality when wired to a tty (the code path
# is different and invokes GNU readline if available).
self.check_input_tty("prompt", b"quux")
def test_input_tty_non_ascii(self):
# Check stdin/stdout encoding is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "utf-8")
def test_input_tty_non_ascii_unicode_errors(self):
# Check stdin/stdout error handler is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "ascii")
# test_int(): see test_int.py for tests of built-in function int().
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
# Some versions of glibc for alpha have a bug that affects
# float -> integer rounding (floor, ceil, rint, round) for
# values in the range [2**52, 2**53). See:
#
# http://sources.redhat.com/bugzilla/show_bug.cgi?id=5350
#
# We skip this test on Linux/alpha if it would fail.
linux_alpha = (platform.system().startswith('Linux') and
platform.machine().startswith('alpha'))
system_round_bug = round(5e15+1) != 5e15+1
@unittest.skipIf(linux_alpha and system_round_bug,
"test will fail; failure is probably due to a "
"buggy system round function")
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
# test_str(): see test_unicode.py and test_bytes.py for str() tests.
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, sum, values, bytearray(b''))
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
empty = []
sum(([x] for x in range(10)), empty)
self.assertEqual(empty, [])
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
class C_get_vars(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
self.assertEqual(vars(self.C_get_vars()), {'a':2})
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
self.assertRaises(RuntimeError, zip, a, TestFailingIter())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_zip_pickle(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
z1 = zip(a, b)
self.check_iter_pickle(z1, t)
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assertTrue(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# --------------------------------------------------------------------
# Issue #7994: object.__format__ with a non-empty format string is
# deprecated
def test_deprecated_format_string(obj, fmt_str, should_raise_warning):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
format(obj, fmt_str)
if should_raise_warning:
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, DeprecationWarning)
self.assertIn('object.__format__ with a non-empty format '
'string', str(w[0].message))
else:
self.assertEqual(len(w), 0)
fmt_strs = ['', 's']
class A:
def __format__(self, fmt_str):
return format('', fmt_str)
for fmt_str in fmt_strs:
test_deprecated_format_string(A(), fmt_str, False)
class B:
pass
class C(object):
pass
for cls in [object, B, C]:
for fmt_str in fmt_strs:
test_deprecated_format_string(cls(), fmt_str, len(fmt_str) != 0)
# --------------------------------------------------------------------
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
def test_construct_singletons(self):
for const in None, Ellipsis, NotImplemented:
tp = type(const)
self.assertIs(tp(), const)
self.assertRaises(TypeError, tp, 1, 2)
self.assertRaises(TypeError, tp, a=1, b=2)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
def test_main(verbose=None):
test_classes = (BuiltinTest, TestSorted)
run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| bsd-3-clause |
EvanK/ansible | lib/ansible/modules/cloud/google/gcp_iam_service_account_facts.py | 10 | 4246 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_iam_service_account_facts
description:
- Gather facts for GCP ServiceAccount
short_description: Gather facts for GCP ServiceAccount
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a service account facts
gcp_iam_service_account_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- The name of the service account.
returned: success
type: str
projectId:
description:
- Id of the project that owns the service account.
returned: success
type: str
uniqueId:
description:
- Unique and stable id of the service account.
returned: success
type: str
email:
description:
- Email address of the service account.
returned: success
type: str
displayName:
description:
- User specified description of service account.
returned: success
type: str
oauth2ClientId:
description:
- OAuth2 client id for the service account.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/iam']
items = fetch_list(module, collection(module))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'iam')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
rizard/fast-failover-demo | example/packetStreamerClientExample.py | 148 | 3893 | #!/usr/bin/python
import urllib2
import json
import re
import sys
from optparse import OptionParser
sys.path.append('~/floodlight/target/gen-py')
sys.path.append('~/floodlight/thrift/lib/py')
from packetstreamer import PacketStreamer
from packetstreamer.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
SESSIONID = 'sessionId'
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0")
parser.add_option("-c", "--controller", dest="controller", metavar="CONTROLLER_IP",
default="127.0.0.1", help="controller's IP address")
parser.add_option("-m", "--mac", dest="mac", metavar="HOST_MAC",
help="The host mac address to trace the OF packets")
(options, args) = parser.parse_args()
def validateIp(ip):
ipReg = ("(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)")
m = re.compile(ipReg).match(ip)
if m:
return True
else :
return False
def validateMac(mac):
macReg = '([a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2}' # same regex as above
m = re.compile(macReg).match(mac)
if m:
return True
else :
return False
if not validateIp(options.controller):
parser.error("Invalid format for ip address.")
if not options.mac:
parser.error("-m or --mac option is required.")
if not validateMac(options.mac):
parser.error("Invalid format for mac address. Format: xx:xx:xx:xx:xx:xx")
controller = options.controller
host = options.mac
url = 'http://%s:8080/wm/core/packettrace/json' % controller
filter = {'mac':host, 'direction':'both', 'period':1000}
post_data = json.dumps(filter)
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
response_text = None
def terminateTrace(sid):
global controller
filter = {SESSIONID:sid, 'period':-1}
post_data = json.dumps(filter)
url = 'http://%s:8080/wm/core/packettrace/json' % controller
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
exit
if not response_text:
print "Failed to start a packet trace session"
sys.exit()
response_text = json.loads(response_text)
sessionId = None
if SESSIONID in response_text:
sessionId = response_text[SESSIONID]
else:
print "Failed to start a packet trace session"
sys.exit()
try:
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = PacketStreamer.Client(protocol)
# Connect!
transport.open()
while 1:
packets = client.getPackets(sessionId)
for packet in packets:
print "Packet: %s"% packet
if "FilterTimeout" in packet:
sys.exit()
except Thrift.TException, e:
print '%s' % (e.message)
terminateTrace(sessionId)
except KeyboardInterrupt, e:
terminateTrace(sessionId)
# Close!
transport.close()
| apache-2.0 |
ville-k/tensorflow | tensorflow/python/saved_model/utils.py | 65 | 1207 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel utility functions.
Utility functions to assist with setup and construction of the SavedModel proto.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.utils_impl import build_tensor_info
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["build_tensor_info",]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
pongem/python-bot-project | appengine/standard/botapp/env/lib/python2.7/site-packages/pip/wheel.py | 338 | 32010 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.optionxform = lambda option: option
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| apache-2.0 |
tylertian/Openstack | openstack F/python-glanceclient/tests/test_base.py | 4 | 2258 | # Copyright 2013 OpenStack Foundation
# Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import testtools
from glanceclient.common import base
class TestBase(testtools.TestCase):
def test_resource_repr(self):
r = base.Resource(None, dict(foo="bar", baz="spam"))
self.assertEqual(repr(r), "<Resource baz=spam, foo=bar>")
def test_getid(self):
self.assertEqual(base.getid(4), 4)
class TmpObject(object):
id = 4
self.assertEqual(base.getid(TmpObject), 4)
def test_two_resources_with_same_id_are_equal(self):
# Two resources of the same type with the same id: equal
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
r2 = base.Resource(None, {'id': 1, 'name': 'hello'})
self.assertEqual(r1, r2)
def test_two_resources_with_eq_info_are_equal(self):
# Two resources with no ID: equal if their info is equal
r1 = base.Resource(None, {'name': 'joe', 'age': 12})
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
self.assertEqual(r1, r2)
def test_two_resources_with_diff_id_are_not_equal(self):
# Two resources with diff ID: not equal
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
r2 = base.Resource(None, {'id': 2, 'name': 'hello'})
self.assertNotEqual(r1, r2)
def test_two_resources_with_not_eq_info_are_not_equal(self):
# Two resources with no ID: not equal if their info is not equal
r1 = base.Resource(None, {'name': 'bill', 'age': 21})
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
self.assertNotEqual(r1, r2)
| apache-2.0 |
rghe/ansible | lib/ansible/modules/packaging/os/rpm_key.py | 100 | 6840 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: rpm_key
author:
- Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
required: true
state:
description:
- If the key will be imported or removed from the rpm db.
default: present
choices: [ absent, present ]
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key:
state: present
key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key:
state: present
key: /path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key:
state: absent
key: DEADB33F
'''
import re
import os.path
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
class RpmKey(object):
def __init__(self, module):
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
self.gpg = self.module.get_bin_path('gpg')
if not self.gpg:
self.gpg = self.module.get_bin_path('gpg2', required=True)
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
rsp, info = fetch_url(self.module, url)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
self.module.add_cleanup_file(tmpname)
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
ret = keyid.strip().upper()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('pub:'):
return line.split(':')[4]
self.module.fail_json(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
cmd = self.rpm + ' -q gpg-pubkey'
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: # No key is installed on system
return False
cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
return True
return False
def import_key(self, keyfile):
if not self.module.check_mode:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, keyid):
if not self.module.check_mode:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
key=dict(type='str', required=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
RpmKey(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py | 356 | 1555 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
| gpl-2.0 |
kod3r/pyh3 | h3/node.py | 3 | 1583 | import collections
import json
from h3math import Point4d
"""
The node structure storing all attributes of a tree node.
"""
class Node(object):
"""
The node constructor.
:param int node_id: the id of node in the node lookup table in tree object
:param int parent_id: the id of parent node in the node lookup table in tree object, default None
:param int depth: the depth of this node in the tree, deault 0
:param int tree_size: the subtree size of the node, ie. the number of nodes blow this node, default 1
:param float radius: the node's hemisphere radius, ie. the distance to its children, default 0
:param float area: the node's hemisphere area, default 0
:param int band: the band which the node's hemisphere is placed in its parent's hemisphere, default -1
:param float theta: the angle of the node's hemisphere rotating around Z axis in spherical space, default 0
:param float phi: the angle between the node and Z axis in spherical space, default 0
:param Point4d coord: the node's 3D coordinate in cartesisan space, default to Point4d(0,0,0,0)
"""
def __init__(self, node_id, parent_id=None, depth=0, tree_size=1, radius=0, area=0):
self.node_id = node_id
self.children = set()
self.parent = parent_id
self.depth = depth
self.tree_size = tree_size
self.radius = radius
self.area = area
self.band = -1
self.theta = 0
self.phi = 0
self.coord = Point4d()
def __repr__(self):
return "<{0}>".format(self.node_id)
| mit |
jounex/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/sitemaps/views.py | 109 | 2968 | import warnings
from functools import wraps
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils import six
def x_robots_tag(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response['X-Robots-Tag'] = 'noindex, noodp, noarchive'
return response
return inner
@x_robots_tag
def index(request, sitemaps,
template_name='sitemap_index.xml', content_type='application/xml',
sitemap_url_name='django.contrib.sitemaps.views.sitemap',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", DeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
sites = []
for section, site in sitemaps.items():
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = urlresolvers.reverse(
sitemap_url_name, kwargs={'section': section})
absolute_url = '%s://%s%s' % (protocol, req_site.domain, sitemap_url)
sites.append(absolute_url)
for page in range(2, site.paginator.num_pages + 1):
sites.append('%s?p=%s' % (absolute_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites},
content_type=content_type)
@x_robots_tag
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', content_type='application/xml',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", DeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = list(six.itervalues(sitemaps))
page = request.GET.get("p", 1)
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site,
protocol=req_protocol))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
return TemplateResponse(request, template_name, {'urlset': urls},
content_type=content_type)
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/tools/compatibility/tf_upgrade.py | 20 | 26502 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.batch_matmul": {
"adj_x": "adjoint_a",
"adj_y": "adjoint_b",
},
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
"tf.op_scope": "tf.name_scope",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.op_scope": ["values", "name", "default_name"],
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory, copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_dics_source_power.py | 4 | 2479 | """
=========================================
Compute source power using DICS beamfomer
=========================================
Compute a Dynamic Imaging of Coherent Sources (DICS) [1]_ filter from
single-trial activity to estimate source power across a frequency band.
References
----------
.. [1] Gross et al. Dynamic imaging of coherent sources: Studying neural
interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Roman Goj <roman.goj@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
###############################################################################
# Reading the raw data:
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()
# Read forward operator
forward = mne.read_forward_solution(fname_fwd)
###############################################################################
# Computing the cross-spectral density matrix at 4 evenly spaced frequencies
# from 6 to 10 Hz. We use a decim value of 20 to speed up the computation in
# this example at the loss of accuracy.
csd = csd_morlet(epochs, tmin=0, tmax=0.5, decim=20,
frequencies=np.linspace(6, 10, 4))
# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, forward, csd, reg=0.5)
stc, freqs = apply_dics_csd(csd, filters)
message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
time_label=message)
| bsd-3-clause |
HousekeepLtd/django | tests/urlpatterns_reverse/included_namespace_urls.py | 199 | 1357 | import warnings
from django.conf.urls import include, patterns, url
from django.utils.deprecation import RemovedInDjango110Warning
from .namespace_urls import URLObject
from .views import view_class_instance
testobj3 = URLObject('testapp', 'test-ns3')
testobj4 = URLObject('testapp', 'test-ns4')
# test deprecated patterns() function. convert to list of urls() in Django 1.10
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RemovedInDjango110Warning)
urlpatterns = patterns('urlpatterns_reverse.views',
url(r'^normal/$', 'empty_view', name='inc-normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', 'empty_view', name='inc-normal-view'),
url(r'^\+\\\$\*/$', 'empty_view', name='inc-special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', 'empty_view', name='inc-mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', 'empty_view', name='inc-no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', view_class_instance, name='inc-view-class'),
(r'^test3/', include(testobj3.urls)),
(r'^test4/', include(testobj4.urls)),
(r'^ns-included3/', include('urlpatterns_reverse.included_urls', namespace='inc-ns3')),
(r'^ns-included4/', include('urlpatterns_reverse.namespace_urls', namespace='inc-ns4')),
)
| bsd-3-clause |
40223141/0505 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| agpl-3.0 |
jesramirez/odoo | addons/stock/res_config.py | 115 | 8115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'propagation_minimum_delta': fields.integer('Minimum Delta for Propagation of a Date Change on moves linked together'),
'internal_transit_location_id': fields.many2one('stock.location', 'Internal Transit Location', help="Technical field used for resupply routes between warehouses that belong to this company", on_delete="restrict"),
}
def create_transit_location(self, cr, uid, company_id, company_name, context=None):
'''Create a transit location with company_id being the given company_id. This is needed
in case of resuply routes between warehouses belonging to the same company, because
we don't want to create accounting entries at that time.
'''
data_obj = self.pool.get('ir.model.data')
try:
parent_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1]
except:
parent_loc = False
location_vals = {
'name': _('%s: Transit Location') % company_name,
'usage': 'transit',
'company_id': company_id,
'location_id': parent_loc,
}
location_id = self.pool.get('stock.location').create(cr, uid, location_vals, context=context)
self.write(cr, uid, [company_id], {'internal_transit_location_id': location_id}, context=context)
def create(self, cr, uid, vals, context=None):
company_id = super(res_company, self).create(cr, uid, vals, context=context)
self.create_transit_location(cr, uid, company_id, vals['name'], context=context)
return company_id
_defaults = {
'propagation_minimum_delta': 1,
}
class stock_config_settings(osv.osv_memory):
_name = 'stock.config.settings'
_inherit = 'res.config.settings'
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'module_procurement_jit': fields.boolean("Generate procurement in real time",
help="""This allows Just In Time computation of procurement orders.
All procurement orders will be processed immediately, which could in some
cases entail a small performance impact.
This installs the module procurement_jit."""),
'module_claim_from_delivery': fields.boolean("Allow claim on deliveries",
help='Adds a Claim link to the delivery order.\n'
'-This installs the module claim_from_delivery.'),
'module_product_expiry': fields.boolean("Expiry date on serial numbers",
help="""Track different dates on products and serial numbers.
The following dates can be tracked:
- end of life
- best before date
- removal date
- alert date.
This installs the module product_expiry."""),
'group_uom': fields.boolean("Manage different units of measure for products",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_uos': fields.boolean("Invoice products in a different unit of measure than the sales order",
implied_group='product.group_uos',
help='Allows you to sell units of a product, but invoice based on a different unit of measure.\n'
'For instance, you can sell pieces of meat that you invoice based on their weight.'),
'group_stock_packaging': fields.boolean("Allow to define several packaging methods on products",
implied_group='product.group_stock_packaging',
help="""Allows you to create and manage your packaging dimensions and types you want to be maintained in your system."""),
'group_stock_production_lot': fields.boolean("Track lots or serial numbers",
implied_group='stock.group_production_lot',
help="""This allows you to assign a lot (or serial number) to the pickings and moves. This can make it possible to know which production lot was sent to a certain client, ..."""),
'group_stock_tracking_lot': fields.boolean("Use packages: pallets, boxes, ...",
implied_group='stock.group_tracking_lot',
help="""This allows to manipulate packages. You can put something in, take something from a package, but also move entire packages and put them even in another package. """),
'group_stock_tracking_owner': fields.boolean("Manage owner on stock",
implied_group='stock.group_tracking_owner',
help="""This way you can receive products attributed to a certain owner. """),
'group_stock_multiple_locations': fields.boolean("Manage multiple locations and warehouses",
implied_group='stock.group_locations',
help="""This will show you the locations and allows you to define multiple picking types and warehouses."""),
'group_stock_adv_location': fields.boolean("Manage advanced routes for your warehouse",
implied_group='stock.group_adv_location',
help="""This option supplements the warehouse application by effectively implementing Push and Pull inventory flows through Routes."""),
'decimal_precision': fields.integer('Decimal precision on weight', help="As an example, a decimal precision of 2 will allow weights like: 9.99 kg, whereas a decimal precision of 4 will allow weights like: 0.0231 kg."),
'propagation_minimum_delta': fields.related('company_id', 'propagation_minimum_delta', type='integer', string="Minimum days to trigger a propagation of date change in pushed/pull flows."),
'module_stock_dropshipping': fields.boolean("Manage dropshipping",
help='\nCreates the dropship route and add more complex tests'
'-This installs the module stock_dropshipping.'),
'module_stock_picking_wave': fields.boolean('Manage picking wave', help='Install the picking wave module which will help you grouping your pickings and processing them in batch'),
}
def onchange_adv_location(self, cr, uid, ids, group_stock_adv_location, context=None):
if group_stock_adv_location:
return {'value': {'group_stock_multiple_locations': True}}
return {}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.id
def get_default_dp(self, cr, uid, fields, context=None):
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'decimal_stock_weight')
return {'decimal_precision': dp.digits}
def set_default_dp(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'decimal_stock_weight')
dp.write({'digits': config.decimal_precision})
_defaults = {
'company_id': _default_company,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rholy/dnf | tests/test_history_undo.py | 13 | 12049 | # test_history_undo.py
# Tests of the history undo command.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""Tests of the history undo command."""
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf import Base
from dnf.exceptions import PackagesNotAvailableError, PackagesNotInstalledError
from dnf.history import NEVRAOperations
from dnf.package import Package
from dnf.transaction import (ERASE, DOWNGRADE, INSTALL, REINSTALL,
TransactionItem, UPGRADE)
from hawkey import split_nevra
from tests.support import mock_sack, ObjectMatcher
from unittest import TestCase
class BaseTest(TestCase):
"""Unit tests of dnf.Base."""
def _create_item_matcher(self, op_type, installed=None, erased=None,
obsoleted=[], reason='unknown'):
"""Create a new instance of dnf.transaction.TransactionItem matcher."""
attrs = {'op_type': op_type,
'installed': self._create_package_matcher(installed)
if installed else installed,
'erased': self._create_package_matcher(erased)
if erased else erased,
'obsoleted': [self._create_package_matcher(nevra)
for nevra in obsoleted],
'reason': reason}
return ObjectMatcher(TransactionItem, attrs)
def _create_package_matcher(self, nevra_str):
"""Create a new instance of dnf.package.Package matcher."""
nevra = split_nevra(nevra_str)
attrs = {'name': nevra.name,
'epoch': nevra.epoch,
'version': nevra.version,
'release': nevra.release,
'arch': nevra.arch}
return ObjectMatcher(Package, attrs)
def setUp(self):
"""Prepare the test fixture."""
self._base = Base()
self._base._sack = mock_sack('main', 'updates')
def test_history_undo_operations_downgrade(self):
"""Test history_undo_operations with a downgrade."""
operations = NEVRAOperations()
operations.add('Downgrade', 'pepper-20-0.x86_64', 'pepper-20-1.x86_64', ('lotus-3-16.x86_64',))
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
UPGRADE, installed='pepper-20-1.x86_64',
erased='pepper-20-0.x86_64'))
self.assertEqual(next(transaction_it),
self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64',
reason='history'))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_downgrade_notavailable(self):
"""Test history_undo_operations with an unavailable downgrade."""
operations = NEVRAOperations()
operations.add('Downgrade', 'pepper-20-0.x86_64', 'pepper-20-2.x86_64')
with self._base, self.assertRaises(PackagesNotAvailableError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'pepper-20-2.x86_64')
def test_history_undo_operations_downgrade_notinstalled(self):
"""Test history_undo_operations with a not installed downgrade."""
operations = NEVRAOperations()
operations.add('Downgrade', 'lotus-3-0.x86_64', 'lotus-3-16.x86_64')
with self._base, self.assertRaises(PackagesNotInstalledError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'lotus-3-0.x86_64')
def test_history_undo_operations_erase(self):
"""Test history_undo_operations with an erase."""
operations = NEVRAOperations()
operations.add('Erase', 'lotus-3-16.x86_64')
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64',
reason='history'))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_erase_twoavailable(self):
"""Test history_undo_operations with an erase available in two repos."""
base = Base()
base._sack = mock_sack('main', 'search')
operations = NEVRAOperations()
operations.add('Erase', 'lotus-3-16.x86_64')
with base:
base.history_undo_operations(operations)
transaction_it = iter(base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64',
reason='history'))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_erase_notavailable(self):
"""Test history_undo_operations with an unavailable erase."""
operations = NEVRAOperations()
operations.add('Erase', 'hole-1-1.x86_64')
with self._base, self.assertRaises(PackagesNotAvailableError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'hole-1-1.x86_64')
def test_history_undo_operations_install(self):
"""Test history_undo_operations with an install."""
operations = NEVRAOperations()
operations.add('Install', 'pepper-20-0.x86_64', obsoleted_nevras=('lotus-3-16.x86_64',))
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
ERASE, erased='pepper-20-0.x86_64'))
self.assertEqual(next(transaction_it),
self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64',
reason='history'))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_install_notinstalled(self):
"""Test history_undo_operations with a not installed install."""
operations = NEVRAOperations()
operations.add('Install', 'mrkite-2-0.x86_64')
with self._base, self.assertRaises(PackagesNotInstalledError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'mrkite-2-0.x86_64')
def test_history_undo_operations_reinstall(self):
"""Test history_undo_operations with a reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'pepper-20-0.x86_64', 'pepper-20-0.x86_64', ('hole-1-1.x86_64',))
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
REINSTALL, installed='pepper-20-0.x86_64',
erased='pepper-20-0.x86_64',
obsoleted=('hole-1-1.x86_64',)))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_reinstall_notavailable(self):
"""Test history_undo_operations with an unvailable reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'mrkite-2-0.x86_64', 'mrkite-2-0.x86_64')
with self._base, self.assertRaises(PackagesNotInstalledError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'mrkite-2-0.x86_64')
def test_history_undo_operations_reinstall_notinstalled(self):
"""Test history_undo_operations with a not installed reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'hole-1-1.x86_64', 'hole-1-1.x86_64')
with self._base, self.assertRaises(PackagesNotAvailableError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'hole-1-1.x86_64')
def test_history_undo_operations_reinstall_notinstalled_obsoleted(self):
"""Test history_undo_operations with a not installed obsoleted of a reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'pepper-20-0.x86_64', 'pepper-20-0.x86_64', ('lotus-3-16.x86_64',))
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
REINSTALL, installed='pepper-20-0.x86_64',
erased='pepper-20-0.x86_64', obsoleted=()))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_update(self):
"""Test history_undo_operations with an update."""
operations = NEVRAOperations()
operations.add('Update', 'tour-5-0.noarch', 'tour-4.6-1.noarch', ('lotus-3-16.x86_64',))
with self._base:
self._base.history_undo_operations(operations)
transaction_it = iter(self._base.transaction)
self.assertEqual(next(transaction_it),
self._create_item_matcher(
DOWNGRADE, installed='tour-4.6-1.noarch',
erased='tour-5-0.noarch'))
self.assertEqual(next(transaction_it),
self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64',
reason='history'))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_update_notavailable(self):
"""Test history_undo_operations with an unavailable update."""
operations = NEVRAOperations()
operations.add('Update', 'tour-5-0.noarch', 'tour-4.6-2.noarch')
with self._base, self.assertRaises(PackagesNotAvailableError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'tour-4.6-2.noarch')
def test_history_undo_operations_update_notinstalled(self):
"""Test history_undo_operations with a not installed update."""
operations = NEVRAOperations()
operations.add('Update', 'lotus-4-0.x86_64', 'lotus-3-16.x86_64')
with self._base, self.assertRaises(PackagesNotInstalledError) as context:
self._base.history_undo_operations(operations)
self.assertEqual(context.exception.pkg_spec, 'lotus-4-0.x86_64')
| gpl-2.0 |
heathseals/CouchPotatoServer | libs/pyutil/test/current/json_tests/test_unicode.py | 106 | 1973 | from unittest import TestCase
from pyutil import jsonutil as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"%s"' % (u,))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["%s"]' % (u,))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
js = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(js), u)
| gpl-3.0 |
jianajavier/pnc-cli | pnc_cli/swagger_client/models/product_version_singleton.py | 2 | 2920 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class ProductVersionSingleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ProductVersionSingleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'ProductVersionRest'
}
self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this ProductVersionSingleton.
:return: The content of this ProductVersionSingleton.
:rtype: ProductVersionRest
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this ProductVersionSingleton.
:param content: The content of this ProductVersionSingleton.
:type: ProductVersionRest
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| apache-2.0 |
bguillot/OpenUpgrade | openerp/report/printscreen/__init__.py | 381 | 1203 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ps_list
import ps_form
""" A special report, that is automatically formatted to look like the
screen contents of Form/List Views.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chris2727/BeastBot | src/inc/modules/hack.py | 4 | 3996 | # coding=utf8
"""
hack.py - Pretend you're doing hacker-ish things (based on http://shinytoylabs.com/jargon/)
Copyright 2014 Max Gurela
Licensed under the Eiffel Forum License 2.
"""
import random
from inc import *
modFunc.addCommand('hack', 'hack', 'hack')
abbreviations = [
'TCP',
'HTTP',
'SDD',
'RAM',
'ASP',
'CSS',
'SSL',
'AGP',
'SQL',
'FTP',
'PCI',
'VIM',
'ADP',
'RSS',
'XML',
'EXE',
'COM',
'HDD',
'THX',
'SMTP',
'SMS',
'USB',
'PNG',
'SSH',
'WWW',
'OSS',
'XSS',
'JAR'
]
adjectives = [
'auxiliary',
'primary',
'back-end',
'digital',
'open-source',
'virtual',
'cross-platform',
'redundant',
'online',
'haptic',
'multi-byte',
'bluetooth',
'wireless',
'1080p',
'neural',
'optical',
'solid state',
'mobile'
]
nouns = [
'driver',
'protocol',
'bandwidth',
'panel',
'microchip',
'program',
'port',
'card',
'array',
'interface',
'system',
'sensor',
'firewall',
'hard drive',
'pixel',
'alarm',
'feed',
'monitor',
'application',
'transmitter',
'bus',
'circuit',
'capacitor',
'matrix'
]
verbs = [
'back up',
'bypass',
'hack',
'override',
'compress',
'copy',
'navigate',
'index',
'connect',
'generate',
'quantify',
'calculate',
'synthesize',
'input',
'transmit',
'program',
'reboot',
'parse',
'clear',
'refresh',
'recalibrate',
'calibrate'
]
ingverbs = [
'backing up',
'bypassing',
'hacking',
'overriding',
'compressing',
'copying',
'navigating',
'indexing',
'connecting',
'generating',
'quantifying',
'calculating',
'synthesizing',
'transmitting',
'programming',
'parsing',
'clearing',
'scripting',
'refreshing',
'calibrating'
]
phrases = [
'If we {verb} the {noun}, we can get to the {abbreviation} {noun} through the {adjective} {abbreviation} {noun}!',
'We need to {verb} the {adjective} {abbreviation} {noun}!',
'Try to {verb} the {abbreviation} {noun}, maybe it will {verb} the {adjective} {noun}!',
'You can\'t {verb} the {noun} without {ingverb} the {adjective} {abbreviation} {noun}!',
'Use the {adjective} {abbreviation} {noun}, then you can {verb} the {adjective} {noun}!',
'The {abbreviation} {noun} is down, {verb} the {adjective} {noun} so we can {verb} the {abbreviation} {noun}!',
'{ingverb} the {noun} won\'t do anything, we need to {verb} the {adjective} {abbreviation} {noun}!',
'I\'ll {verb} the {adjective} {abbreviation} {noun}, that should {verb} the {abbreviation} {noun}!'
]
def sentence_cap(s):
return s[0].upper() + s[1:]
def build_phrase(phrase):
if '{' in phrase:
phrase = phrase.replace('{abbreviation}', random.choice(abbreviations), 1)
phrase = phrase.replace('{adjective}', random.choice(adjectives), 1)
phrase = phrase.replace('{noun}', random.choice(nouns), 1)
phrase = phrase.replace('{verb}', random.choice(verbs), 1)
phrase = phrase.replace('{ingverb}', random.choice(ingverbs), 1)
if '{' in phrase:
return build_phrase(phrase)
else:
return sentence_cap(phrase)
else:
return sentence_cap(phrase)
def hack(line, irc):
"""
!hack [target] - Pretend you're doing hacker-ish things (based on http://shinytoylabs.com/jargon/)
"""
message, username, msgto = ircFunc.ircMessage(line)
phrase = build_phrase(random.choice(phrases))
try:
if message[1]:
nick = message[1].strip() + " "
ircFunc.ircSay(msgto, '{0}: {1}'.format(nick, phrase), irc)
except IndexError as e:
ircFunc.ircSay(msgto, '{0}: {1}'.format(username, phrase), irc)
except Exception as e:
errorhandling.intputError('critical', e, line)
| gpl-3.0 |
Arable/evepod | lib/python2.7/site-packages/pymongo/replica_set_connection.py | 13 | 11843 | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools for connecting to a MongoDB replica set.
.. warning::
**DEPRECATED:** Please use :mod:`~pymongo.mongo_replica_set_client` instead.
.. seealso:: :doc:`/examples/high_availability` for more examples of
how to connect to a replica set.
To get a :class:`~pymongo.database.Database` instance from a
:class:`ReplicaSetConnection` use either dictionary-style or
attribute-style access:
.. doctest::
>>> from pymongo import ReplicaSetConnection
>>> c = ReplicaSetConnection('localhost:27017', replicaSet='repl0')
>>> c.test_database
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
>>> c['test_database']
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
"""
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
from pymongo.errors import ConfigurationError
class ReplicaSetConnection(MongoReplicaSetClient):
"""Connection to a MongoDB replica set.
"""
def __init__(self, hosts_or_uri=None, max_pool_size=None,
document_class=dict, tz_aware=False, **kwargs):
"""Create a new connection to a MongoDB replica set.
.. warning::
**DEPRECATED:** :class:`ReplicaSetConnection` is deprecated. Please
use :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
instead
The resultant connection object has connection-pooling built
in. It also performs auto-reconnection when necessary. If an
operation fails because of a connection error,
:class:`~pymongo.errors.ConnectionFailure` is raised. If
auto-reconnection will be performed,
:class:`~pymongo.errors.AutoReconnect` will be
raised. Application code should handle this exception
(recognizing that the operation failed) and then continue to
execute.
Raises :class:`~pymongo.errors.ConnectionFailure` if
the connection cannot be made.
The `hosts_or_uri` parameter can be a full `mongodb URI
<http://dochub.mongodb.org/core/connections>`_, in addition to
a string of `host:port` pairs (e.g. 'host1:port1,host2:port2').
If `hosts_or_uri` is None 'localhost:27017' will be used.
.. note:: Instances of :class:`~ReplicaSetConnection` start a
background task to monitor the state of the replica set. This allows
it to quickly respond to changes in replica set configuration.
Before discarding an instance of :class:`~ReplicaSetConnection` make
sure you call :meth:`~close` to ensure that the monitor task is
cleanly shut down.
:Parameters:
- `hosts_or_uri` (optional): A MongoDB URI or string of `host:port`
pairs. If a host is an IPv6 literal it must be enclosed in '[' and
']' characters following the RFC2732 URL syntax (e.g. '[::1]' for
localhost)
- `max_pool_size` (optional): The maximum number of connections
each pool will open simultaneously. If this is set, operations
will block if there are `max_pool_size` outstanding connections
from the pool. By default the pool size is unlimited.
- `document_class` (optional): default class to use for
documents returned from queries on this connection
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`ReplicaSetConnection` will be timezone
aware (otherwise they will be naive)
- `replicaSet`: (required) The name of the replica set to connect to.
The driver will verify that each host it connects to is a member of
this replica set. Can be passed as a keyword argument or as a
MongoDB URI option.
| **Other optional parameters can be passed as keyword arguments:**
- `host`: For compatibility with connection.Connection. If both
`host` and `hosts_or_uri` are specified `host` takes precedence.
- `port`: For compatibility with connection.Connection. The default
port number to use for hosts.
- `network_timeout`: For compatibility with connection.Connection.
The timeout (in seconds) to use for socket operations - default
is no timeout. If both `network_timeout` and `socketTimeoutMS` are
specified `network_timeout` takes precedence, matching
connection.Connection.
- `socketTimeoutMS`: (integer) How long (in milliseconds) a send or
receive on a socket can take before timing out.
- `connectTimeoutMS`: (integer) How long (in milliseconds) a
connection can take to be opened before timing out.
- `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a
thread will wait for a socket from the pool if the pool has no
free sockets. Defaults to ``None`` (no timeout).
- `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give
the number of threads allowed to wait for a socket at one time.
Defaults to ``None`` (no waiters).
- `auto_start_request`: If ``True`` (the default), each thread that
accesses this :class:`ReplicaSetConnection` has a socket allocated
to it for the thread's lifetime, for each member of the set. For
:class:`~pymongo.read_preferences.ReadPreference` PRIMARY,
auto_start_request=True ensures consistent reads, even if you read
after an unsafe write. For read preferences other than PRIMARY,
there are no consistency guarantees.
- `use_greenlets`: if ``True``, use a background Greenlet instead of
a background thread to monitor state of replica set. Additionally,
:meth:`start_request()` will ensure that the current greenlet uses
the same socket for all operations until :meth:`end_request()`.
`use_greenlets` with ReplicaSetConnection requires `Gevent
<http://gevent.org/>`_ to be installed.
| **Write Concern options:**
- `safe`: :class:`ReplicaSetConnection` **disables** acknowledgement
of write operations. Use ``safe=True`` to enable write
acknowledgement.
- `w`: (integer or string) Write operations will block until they have
been replicated to the specified number or tagged set of servers.
`w=<int>` always includes the replica set primary (e.g. w=3 means
write to the primary and wait until replicated to **two**
secondaries). Implies safe=True.
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised. Implies safe=True.
- `j`: If ``True`` block until write operations have been committed
to the journal. Ignored if the server is running without journaling.
Implies safe=True.
- `fsync`: If ``True`` force the database to fsync all files before
returning. When used with `j` the server awaits the next group
commit before returning. Implies safe=True.
| **Read preference options:**
- `slave_okay` or `slaveOk` (deprecated): Use `read_preference`
instead.
- `read_preference`: The read preference for this connection.
See :class:`~pymongo.read_preferences.ReadPreference` for available
- `tag_sets`: Read from replica-set members with these tags.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." :class:`MongoReplicaSetClient` tries each set of
tags in turn until it finds a set of tags with at least one matching
member.
- `secondary_acceptable_latency_ms`: (integer) Any replica-set member
whose ping time is within secondary_acceptable_latency_ms of the
nearest member may accept reads. Default 15 milliseconds.
**Ignored by mongos** and must be configured on the command line.
See the localThreshold_ option for more information.
| **SSL configuration:**
- `ssl`: If ``True``, create the connection to the servers using SSL.
- `ssl_keyfile`: The private keyfile used to identify the local
connection against mongod. If included with the ``certfile` then
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
- `ssl_certfile`: The certificate file used to identify the local
connection against mongod. Implies ``ssl=True``.
- `ssl_cert_reqs`: Specifies whether a certificate is required from
the other side of the connection, and whether it will be validated
if provided. It must be one of the three values ``ssl.CERT_NONE``
(certificates ignored), ``ssl.CERT_OPTIONAL``
(not required, but validated if provided), or ``ssl.CERT_REQUIRED``
(required and validated). If the value of this parameter is not
``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point
to a file of CA certificates. Implies ``ssl=True``.
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
"certification authority" certificates, which are used to validate
certificates passed from the other end of the connection.
Implies ``ssl=True``.
.. versionchanged:: 2.5
Added additional ssl options
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added `auto_start_request` and `use_greenlets` options.
Added support for `host`, `port`, and `network_timeout` keyword
arguments for compatibility with connection.Connection.
.. versionadded:: 2.1
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
network_timeout = kwargs.pop('network_timeout', None)
if network_timeout is not None:
if (not isinstance(network_timeout, (int, float)) or
network_timeout <= 0):
raise ConfigurationError("network_timeout must "
"be a positive integer")
kwargs['socketTimeoutMS'] = network_timeout * 1000
kwargs['auto_start_request'] = kwargs.get('auto_start_request', True)
kwargs['safe'] = kwargs.get('safe', False)
super(ReplicaSetConnection, self).__init__(
hosts_or_uri, max_pool_size, document_class, tz_aware, **kwargs)
def __repr__(self):
return "ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in self.hosts],)
| apache-2.0 |
ximion/dak | dak/dakdb/update95.py | 7 | 1794 | #!/usr/bin/env python
# coding=utf8
"""
Require SHA-1 and SHA-256 checksums in "files" table.
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
statements = [
"""
ALTER TABLE files
ALTER COLUMN sha1sum SET NOT NULL,
ALTER COLUMN sha256sum SET NOT NULL
""",
]
################################################################################
def do_update(self):
print __doc__
try:
cnf = Config()
c = self.db.cursor()
for stmt in statements:
c.execute(stmt)
c.execute("UPDATE config SET value = '95' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 95, rollback issued. Error message: {0}'.format(msg))
| gpl-2.0 |
mrquim/mrquimrepo | script.module.youtube.dl/lib/youtube_dl/extractor/eroprofile.py | 61 | 3218 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
unescapeHTML
)
class EroProfileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/view/(?P<id>[^/]+)'
_LOGIN_URL = 'http://www.eroprofile.com/auth/auth.php?'
_NETRC_MACHINE = 'eroprofile'
_TESTS = [{
'url': 'http://www.eroprofile.com/m/videos/view/sexy-babe-softcore',
'md5': 'c26f351332edf23e1ea28ce9ec9de32f',
'info_dict': {
'id': '3733775',
'display_id': 'sexy-babe-softcore',
'ext': 'm4v',
'title': 'sexy babe softcore',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
'md5': '1baa9602ede46ce904c431f5418d8916',
'info_dict': {
'id': '1133519',
'ext': 'm4v',
'title': 'Try It On Pee_cut_2.wmv - 4shared.com - file sharing - download movie file',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': 'Requires login',
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
query = compat_urllib_parse_urlencode({
'username': username,
'password': password,
'url': 'http://www.eroprofile.com/',
})
login_url = self._LOGIN_URL + query
login_page = self._download_webpage(login_url, None, False)
m = re.search(r'Your username or password was incorrect\.', login_page)
if m:
raise ExtractorError(
'Wrong username and/or password.', expected=True)
self.report_login()
redirect_url = self._search_regex(
r'<script[^>]+?src="([^"]+)"', login_page, 'login redirect url')
self._download_webpage(redirect_url, None, False)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
webpage, 'video id', default=None)
video_url = unescapeHTML(self._search_regex(
r'<source src="([^"]+)', webpage, 'video url'))
title = self._html_search_regex(
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
thumbnail = self._search_regex(
r'onclick="showVideoPlayer\(\)"><img src="([^"]+)',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'age_limit': 18,
}
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/operations/_route_filters_operations.py | 1 | 27170 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilter"]
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2019_11_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
route_filter_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Updates tags of a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param parameters: Parameters supplied to update route filter tags.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
| mit |
maoxuxiang/termite_mallet_project | web2py/gluon/contrib/rss2.py | 44 | 16164 | """
PyRSS2Gen - A Python library for generating RSS 2.0 feeds.
(This is the BSD license, based on the template at
http://www.opensource.org/licenses/bsd-license.php )
Copyright (c) 2003, Dalke Scientific Software, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the Dalke Scientific Softare, LLC, Andrew
Dalke, nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__name__ = "PyRSS2Gen"
__version__ = (1, 1, 0)
__author__ = "Andrew Dalke <dalke@dalkescientific.com>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
import sys
if sys.version_info[0] == 3:
# Python 3
basestring = str
from io import StringIO
else:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
# Very old (or memory constrained) systems might
# have left out the compiled C version. Fall back
# to the pure Python one. Haven't seen this sort
# of system since the early 2000s.
from StringIO import StringIO
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding="iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding="iso-8859-1"):
f = StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d={}):
if isinstance(obj, basestring) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month - 1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain=None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width=None, height=None, description=None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink=1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language=None,
copyright=None,
managingEditor=None,
webMaster=None,
pubDate=None, # a datetime, *in* *GMT*
lastBuildDate=None, # a datetime
categories=None, # list of strings or Category
generator=_generator_name,
docs="http://blogs.law.harvard.edu/tech/rss",
cloud=None, # a Cloud
ttl=None, # integer number of minutes
image=None, # an Image
rating=None, # a string; I don't know how it's used
textInput=None, # a TextInput
skipHours=None, # a SkipHours with a list of integers
skipDays=None, # a SkipDays with a list of strings
items=None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "ttl", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title=None, # string
link=None, # url as string
description=None, # string
author=None, # email address as string
categories=None, # list of string or Category
comments=None, # url as string
enclosure=None, # an Enclosure
guid=None, # a unique string
pubDate=None, # a datetime
source=None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.