code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import json
import six
_transform_registry = {}
def register_transform(from_type, func):
_transform_registry[from_type] = func
class BaseModel(object):
def __str__(self):
str_dict = {}
for key, value in six.iteritems(self.__dict__):
if key.startswith('_'):
continue
str_dict[key] = value
return json.dumps(str_dict)
def __repr__(self):
return self.__str__()
@classmethod
def transform(cls, from_model):
if isinstance(from_model, list):
if from_model:
key = type(from_model[0])
else:
return []
elif not from_model:
return
else:
key = type(from_model)
func = _transform_registry[key]
if isinstance(from_model, list):
return [func(item) for item in from_model]
return func(from_model)
class Server(BaseModel):
def __init__(self, uuid=None, name=None, flavor=None, image=None,
networks=None):
self.uuid = uuid
self.name = name
self.flavor = flavor
self.image = image
self.networks = networks or []
|
omninubes/nubes
|
nubes/common/models.py
|
Python
|
apache-2.0
| 1,198
|
from __future__ import absolute_import, print_function
import re
from collections import defaultdict
from constants import (
CONTACT_TYPE_EMAIL,
CONTACT_TYPE_SMS,
CONTACT_TYPE_YO,
STATUS_CANCELLED,
STATUS_CLOSED,
STATUS_FULL,
STATUS_OPEN,
STATUS_STOPPED,
STATUS_TENTATIVE,
)
from dbhelper import db_session
from models import Klass
from sqlalchemy.sql import exists
web_status_to_db_status_dict = {'open': STATUS_OPEN,
'full': STATUS_FULL,
'clos': STATUS_CLOSED,
'tent': STATUS_TENTATIVE,
'canc': STATUS_CANCELLED,
'stop': STATUS_STOPPED}
def web_status_to_db_status(status):
return web_status_to_db_status_dict[status[:4].lower()]
db_status_to_text_status_dict = {STATUS_OPEN: 'Open',
STATUS_FULL: 'Full',
STATUS_CLOSED: 'Closed',
STATUS_TENTATIVE: 'Tent',
STATUS_CANCELLED: 'Canc',
STATUS_STOPPED: 'Stop'}
def db_status_to_text_status(status):
return db_status_to_text_status_dict[status]
web_day_to_int_day_dict = {'mon': 0,
'tue': 1,
'wed': 2,
'thu': 3,
'fri': 4,
'sat': 5,
'sun': 6}
def web_day_to_int_day(day):
return web_day_to_int_day_dict[day.lower()]
int_day_to_text_day_dict = {0: 'Mon',
1: 'Tue',
2: 'Wed',
3: 'Thu',
4: 'Fri',
5: 'Sat',
6: 'Sun'}
def int_day_to_text_day(day):
if day is None:
return None
return int_day_to_text_day_dict[day]
def hour_of_day_to_seconds_since_midnight(hour):
hour = hour.split(':')
seconds = int(hour[0]) * 60 * 60
if len(hour) == 2:
seconds += int(hour[1]) * 60
return seconds
def seconds_since_midnight_to_hour_of_day(seconds):
if seconds is None:
return None
hour = seconds / 60.0 / 60.0
if hour % 1 == 0.5:
hour = str(int(hour)) + ':30'
else:
hour = str(int(hour))
return hour
def contact_type_description(contact_type):
pretty_contact_type = {CONTACT_TYPE_EMAIL: 'an email',
CONTACT_TYPE_SMS: 'an SMS', CONTACT_TYPE_YO: 'a YO'}
return pretty_contact_type[contact_type]
def validate_klass_id(klass_id):
klass_id = int(klass_id)
if not db_session.query(exists().where(Klass.klass_id ==
klass_id)).scalar():
raise KeyError
return klass_id
def validate_course_id(course_id):
if re.match(r'^[A-Z]{4}[0-9]{4}$', course_id):
return course_id[:4], course_id[4:]
else:
raise Exception
def klasses_to_template_courses(klasses):
courses_dict = defaultdict(list)
for klass in klasses:
courses_dict[klass.course.compound_id].append(klass)
def sort_key(c):
return (c.klass_type,
c.timeslots[0].day if c.timeslots else None,
c.timeslots[0].start_time if c.timeslots else None)
courses = [{'course_id': course_id,
'classes': [c.to_dict() for c in sorted(classes, key=sort_key)]}
for course_id, classes in sorted(courses_dict.iteritems())]
return courses
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
|
Chybby/Tutorifull
|
util.py
|
Python
|
mit
| 3,758
|
import datetime as dt
import errno
import logging
import os
import time
from nose.tools import *
import psycopg2
import socorro.lib.ConfigurationManager as configurationManager
import socorro.database.schema as schema
import socorro.database.postgresql as socorro_psg
from socorro.unittest.testlib.testDB import TestDB
import dbTestconfig as testConfig
class Me:
pass
me = None
def setup_module():
global me
if me:
return
me = Me()
me.config = configurationManager.newConfiguration(configurationModule = testConfig, applicationName='Testing Postgresql Utils')
myDir = os.path.split(__file__)[0]
if not myDir: myDir = '.'
replDict = {'testDir':'%s'%myDir}
for i in me.config:
try:
me.config[i] = me.config.get(i)%(replDict)
except:
pass
me.logFilePathname = me.config.logFilePathname
if not me.logFilePathname:
me.logFilePathname = 'logs/db_test.log'
logFileDir = os.path.split(me.logFilePathname)[0]
try:
os.makedirs(logFileDir)
except OSError,x:
if errno.EEXIST == x.errno: pass
else: raise
f = open(me.logFilePathname,'w')
f.close()
fileLog = logging.FileHandler(me.logFilePathname, 'a')
fileLog.setLevel(logging.DEBUG)
fileLogFormatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
fileLog.setFormatter(fileLogFormatter)
me.logger = logging.getLogger("db_test")
me.logger.addHandler(fileLog)
me.logger.setLevel(logging.DEBUG)
me.dsn = "host=%s dbname=%s user=%s password=%s" % (me.config.databaseHost,me.config.databaseName,
me.config.databaseUserName,me.config.databasePassword)
me.testDB = TestDB()
me.expectedTableNames = set(['tcbyurlconfig', 'topcrashurlfacts', 'priorityjobs', 'branches', 'processors', 'productdims', 'topcrashurlfactsreports', 'urldims', 'mtbfconfig', 'signaturedims', 'reports', 'server_status', 'dumps', 'extensions', 'mtbffacts', 'frames', 'topcrashers', 'jobs',])
me.expectedTableDependencies = {
'topcrashurlfactsreports': set(['productdims', 'signaturedims', 'urldims', 'topcrashurlfacts', 'topcrashurlfactsreports']),
'signaturedims': set(['signaturedims']),
'jobs': set(['processors', 'jobs']),
'processors': set(['processors']),
'mtbfconfig': set(['productdims', 'mtbfconfig']),
'urldims': set(['urldims']),
'productdims': set(['productdims']),
'topcrashurlfacts': set(['productdims', 'signaturedims', 'urldims', 'topcrashurlfacts']),
'reports': set(['reports']),
'server_status': set(['server_status']),
'dumps': set(['dumps']),
'tcbyurlconfig': set(['productdims', 'tcbyurlconfig']),
'priorityjobs': set(['priorityjobs']),
'mtbffacts': set(['productdims', 'mtbffacts']),
'topcrashers': set(['topcrashers']),
'frames': set(['frames']),
'branches': set(['branches']),
'extensions': set(['extensions']),
}
def teardown_module():
me.testDB.removeDB(me.config,me.logger)
def testMondayPairsIteratorFactory():
"""testMondayPairsIteratorFactory():
- check that we get the Monday-to-Monday full week from one non-Monday
- check that we get the appropriate weeks for a pair of Mondays
- check that we get appropriate weeks for a leading Monday, trailing non-Monday
- check that we raise ValueError for reverse-order arguments
- check that we raise TypeError for non-date arguments
"""
data = [
(dt.date(2008,1,1),dt.date(2008,1,1),[(dt.date(2007, 12, 31), dt.date(2008, 1, 7))]),
(dt.date(2008,1,7),dt.date(2008,1,19),[(dt.date(2008, 1, 7), dt.date(2008, 1, 14)), (dt.date(2008, 1, 14), dt.date(2008, 1, 21))]),
(dt.date(2008,1,9),dt.date(2008,2,3),[(dt.date(2008, 1, 7), dt.date(2008, 1, 14)), (dt.date(2008, 1, 14), dt.date(2008, 1, 21)), (dt.date(2008, 1, 21), dt.date(2008, 1, 28)), (dt.date(2008, 1, 28), dt.date(2008, 2, 4))]),
(dt.date(2008,1,7),dt.date(2008,1,1),ValueError),
("bad",dt.date(2008,1,1),TypeError),
(dt.date(2008,1,1),"bad",TypeError),
(39,"worse",TypeError),
]
for datePair in data:
expected = datePair[-1]
if isinstance(expected,list):
di = schema.mondayPairsIteratorFactory(*(datePair[:2]))
got = [ x for x in di ]
assert datePair[-1] == got , 'Expected %s, got %s'%(datePair[-1],got)
else:
assert_raises(expected,schema.mondayPairsIteratorFactory,*(datePair[:2]))
def testGetOrderedSetupList():
""" testGetOrderedSetupList()
- check that the full list is what we expect
- check that each table has expected dependencies
- check that (maybe buggy) non-existent table has only self as dependency
"""
global me
lookup = {}
allTables = schema.getOrderedSetupList()
for t in allTables:
try:
n = t(logger=me.logger).name
lookup[t] = n
except:
lookup[t] = t
gotTableNames = set(lookup.values())
assert me.expectedTableNames == gotTableNames, "Expected:\n %s\nGot:\n %s"%(me.expectedTableNames,gotTableNames)
gotDependencies = {}
for t in allTables:
gotDependencies[lookup[t]] = set([lookup[x] for x in schema.getOrderedSetupList((t,))])
assert me.expectedTableDependencies == gotDependencies, "Expected:\n %s\nGot\n %s"%(expectedDependencies,gotDependencies)
assert ['woohoo'] == schema.getOrderedSetupList(['woohoo'])
def testGetOrderedPartitionList():
"""
testGetOrderedPartitionList():
- check that the full list is what we expect
- check that each table has expected dependencies
"""
global me
lookup = {}
expected = {
'frames': set(['frames', 'reports']),
'extensions': set(['extensions', 'reports']),
'dumps': set(['dumps', 'reports']),
}
allTables = schema.getOrderedSetupList()
for t in allTables:
n = t(logger=me.logger).name
lookup[t] = n
allTables = schema.getOrderedSetupList()
for t in allTables:
tableName = lookup[t]
gotValue = set([lookup[x] for x in schema.getOrderedPartitionList([t])])
if tableName in expected:
assert expected[tableName] == gotValue, 'Expected %s, got %s'%(expected[tableName],gotValue)
else:
assert set([tableName]) == gotValue, 'Expected %s, got %s'%(set([tableName]),gotValue)
def testTwoPartitionCreatedFunctions():
"""
testTwoPartitionCreatedFunctions():
- check that we start empty, that an added one is seen and that a non-added one is not seen
"""
assert set() == schema.partitionCreationHistory
assert not schema.partitionWasCreated('woo')
schema.markPartitionCreated('woo')
assert set(['woo']) == schema.partitionCreationHistory
assert schema.partitionWasCreated('woo')
assert not schema.partitionWasCreated('foo')
def testConnectToDatabase():
"""
testConnectToDatabase():
- check that we can connect to the database and do something with the connection and cursor provided
"""
global me
tcon,tcur = schema.connectToDatabase(me.config,me.logger)
connection = psycopg2.connect(me.dsn)
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS foo")
connection.commit()
try:
cursor.execute("CREATE TABLE foo (id integer)")
connection.commit()
cursor.execute("SELECT * from foo")
connection.commit()
assert [] == cursor.fetchall()
tcur.execute("INSERT INTO foo (id) values(%s)",(666,))
tcon.commit()
cursor.execute("SELECT * from foo")
assert 666 == cursor.fetchone()[0]
finally:
cursor.execute("DROP TABLE IF EXISTS foo")
connection.commit()
connection.close()
def testSetupAndTeardownDatabase():
"""
testSetupAndTeardownDatabase():
- test that when we setupDatabase, we get all the expected tables
- test that when we teardownDatabase, we remove all the expected tables
"""
global me
tcon,tcur = schema.connectToDatabase(me.config,me.logger)
tcur.execute("DROP TABLE IF EXISTS %s CASCADE"%','.join(me.expectedTableNames))
tcon.commit()
try:
schema.setupDatabase(me.config,me.logger)
try:
for t in me.expectedTableNames:
# next line raises if the table does not exist
tcur.execute("SELECT count(*) from %s"%t)
tcon.commit()
count = tcur.fetchone()[0]
assert 0 == count
finally:
schema.teardownDatabase(me.config,me.logger)
for t in me.expectedTableNames:
try:
tcur.execute("SELECT count(*) from %s"%t)
assert False, 'Expected table %s does not exist'%t
except psycopg2.ProgrammingError:
tcon.rollback()
except Exception,x:
assert False, 'Expected psycopg2.ProgrammingError, not %s: %s'%(type(x),x)
finally:
tcon.close()
updated = []
def testUpdateDatabase():
"""
testUpdateDatabase():
- check that we fire (only) the appropriate updateDefinition methods. Not much else is possible
"""
global me, updated
updated = []
expected = set(['reports','dumps','extensions','frames','processors','jobs'])
found = set([ x(logger=me.logger).name for x in schema.databaseObjectClassListForUpdate])
assert expected == found
class ReportsStub(schema.ReportsTable):
def __init__(self, logger, **kwargs):
super(ReportsStub,self).__init__(logger,**kwargs)
def updateDefinition(self,cursor):
updated.append(self.name)
class DumpsStub(schema.DumpsTable):
def __init__(self, logger, **kwargs):
super(DumpsStub,self).__init__(logger,**kwargs)
def updateDefinition(self,cursor):
updated.append(self.name)
schema.databaseObjectClassListForUpdate = []
schema.updateDatabase(me.config,me.logger)
assert [] == updated
schema.databaseObjectClassListForUpdate = [ReportsStub]
schema.updateDatabase(me.config,me.logger)
assert ['reports'] == updated
def testModuleCreatePartitions():
"""
testModuleCreatePartitions():
"""
global me
connection = psycopg2.connect(me.dsn)
try:
cursor = connection.cursor()
me.testDB.removeDB(me.config,me.logger)
me.testDB.createDB(me.config,me.logger)
me.config.startDate = dt.date(2008,1,1)
me.config.endDate = dt.date(2008,1,1)
reportSet = set(socorro_psg.tablesMatchingPattern('reports%',cursor))
extensionSet = set(socorro_psg.tablesMatchingPattern('extensions%',cursor))
frameSet0 = set(socorro_psg.tablesMatchingPattern('frames%',cursor))
schema.databaseObjectClassListForWeeklyPartitions = [schema.ExtensionsTable]
schema.createPartitions(me.config,me.logger)
moreReportSet = set(socorro_psg.tablesMatchingPattern('report%',cursor))-reportSet
moreExtensionSet = set(socorro_psg.tablesMatchingPattern('extensions%',cursor))-extensionSet
assert set(['reports_20071231']) == moreReportSet
assert set(['extensions_20071231']) == moreExtensionSet
frameSet = set(socorro_psg.tablesMatchingPattern('frames%',cursor))
assert frameSet0 == frameSet
schema.databaseObjectClassListForWeeklyPartitions = [schema.FramesTable]
schema.createPartitions(me.config,me.logger)
moreFrameSet = set(socorro_psg.tablesMatchingPattern('frames%',cursor))-frameSet
assert set(['frames_20071231']) == moreFrameSet
finally:
connection.close()
class TestDatabaseObject:
def setUp(self):
self.connection = psycopg2.connect(me.dsn)
def tearDown(self):
self.connection.close()
def testConstructor(self):
"""
TestDatabaseObject.testConstructor(self):
- check that default constructor works as expected
- check that constructor arguments are handled
"""
dbo = schema.DatabaseObject()
assert None == dbo.name
assert None == dbo.creationSql
assert None == dbo.logger
dbo = schema.DatabaseObject("name","aLogger","--nothing here",blather='skyte')
# and note that arbitrary kwarg is accepted. Don't check for ignored
assert 'name' == dbo.name
assert 'aLogger' == dbo.logger
assert '--nothing here' == dbo.creationSql
def testCreate(self):
"""
TestDatabaseObject.testCreate():
- check that we can create a table that inherits schema.DatabaseObject
- check that if we try to create it again, the call succeeds without disturbing the database
"""
class Foo(schema.DatabaseObject):
def __init__(self):
super(Foo,self).__init__(name='foo',logger=me.logger,creationSql='CREATE TABLE foo (name varchar)')
def additionalCreationProcedures(self,cursor):
cursor.execute("insert into foo values(%s)",(self.name,))
cursor = self.connection.cursor()
cursor.execute("DROP TABLE IF EXISTS foo CASCADE")
self.connection.commit()
try:
assert_raises(psycopg2.ProgrammingError, cursor.execute, 'SELECT name from foo')
self.connection.rollback()
testFoo = Foo()
testFoo.create(self.connection.cursor())
self.connection.commit()
cursor.execute('SELECT name from foo')
assert 'foo' == cursor.fetchall()[0][0]
self.connection.commit()
testFoo.create(self.connection.cursor())
self.connection.commit()
cursor.execute('SELECT name from foo')
assert 'foo' == cursor.fetchall()[0][0]
self.connection.commit()
finally:
cursor.execute("DROP TABLE IF EXISTS foo CASCADE")
self.connection.commit()
class TestTable:
def setUp(self):
self.connection = psycopg2.connect(me.dsn)
def tearDown(self):
self.connection.close()
def testCreateAndDrop(self):
"""
TestTable.testCreateAndDrop():
- check that we can in fact (create then) drop a table that inherits schema.Table
"""
class Foo(schema.Table):
def __init__(self):
super(Foo,self).__init__(name='foo',logger=me.logger,creationSql='CREATE TABLE foo (name varchar)')
cursor = self.connection.cursor()
cursor.execute("DROP TABLE IF EXISTS foo CASCADE")
self.connection.commit()
testFoo = Foo()
try:
assert_raises(psycopg2.ProgrammingError, cursor.execute, 'SELECT count(*)from foo')
self.connection.rollback()
testFoo = Foo()
testFoo.create(self.connection.cursor())
self.connection.commit()
cursor.execute('SELECT count(*) from foo')
assert 0 == cursor.fetchall()[0][0]
testFoo.drop(cursor)
self.connection.commit()
assert_raises(psycopg2.ProgrammingError, cursor.execute, 'SELECT count(*)from foo')
self.connection.rollback()
finally:
cursor.execute("DROP TABLE IF EXISTS foo CASCADE")
self.connection.commit()
# During maintenance on schema.py: If you add, remove or rename any of the tables in schema, make a parallel change here
# value[0] is True iff the table is a PartitionedTable; value[1] is the expectedSet of table names (including precursors) for each Table
hardCodedSchemaClasses = {
schema.BranchesTable:[False,set(['branches'])],
schema.DumpsTable:[True,set(['dumps'])],
schema.ExtensionsTable:[True,set(['extensions'])],
schema.FramesTable:[True,set(['frames'])],
schema.JobsTable:[False,set(['jobs', 'processors'])],
schema.MTBFConfigTable:[False,set(['mtbfconfig', 'productdims'])],
schema.MTBFFactsTable:[False,set(['mtbffacts', 'productdims'])],
schema.PriorityJobsTable:[False,set(['priorityjobs'])],
schema.ProcessorsTable:[False,set(['processors'])],
schema.ProductDimsTable:[False,set(['productdims'])],
schema.ReportsTable:[True,set(['reports'])],
schema.ServerStatusTable:[False,set(['server_status'])],
schema.SignatureDimsTable:[False,set(['signaturedims'])],
schema.TCByUrlConfigTable:[False,set(['tcbyurlconfig', 'productdims'])],
schema.TopCrashUrlFactsReportsTable:[False,set(['topcrashurlfactsreports', 'urldims', 'signaturedims', 'topcrashurlfacts', 'productdims'])],
schema.TopCrashUrlFactsTable:[False,set(['urldims', 'signaturedims', 'topcrashurlfacts', 'productdims'])],
schema.TopCrashersTable:[False,set(['topcrashers'])],
schema.UrlDimsTable:[False,set(['urldims'])],
}
schemaClasses = {}
def makeClassList():
global schemaClasses
for thing in dir(schema):
item = getattr(schema,thing)
try:
if issubclass(item, schema.Table):
if item in [schema.Table, schema.PartitionedTable]:
continue
if issubclass(item, schema.PartitionedTable):
schemaClasses[item] = schema.PartitionedTable
else:
schemaClasses[item] = schema.Table
except TypeError:
pass
assert set(hardCodedSchemaClasses.keys()) == set(schemaClasses.keys()), "You probably didn't update 'hardCodedSchemaClasses' in this test when you made a change in schema.py. Please do so now."
expectedPartitionedTables = set([x for x in hardCodedSchemaClasses.keys() if hardCodedSchemaClasses[x][0]])
seenPartitionedTables = set([x for x in schemaClasses.keys() if schema.PartitionedTable == schemaClasses[x]])
assert expectedPartitionedTables == seenPartitionedTables, "Expected: %s, got: %s"%(expectedPartitionedTables,seenPartitionedTables)
def printDbTablenames(tag,aCursor):
"""Debugging utility"""
all = socorro_psg.tablesMatchingPattern('%',aCursor)
some = [x for x in all if (x == 'server_status' or not '_' in x)]
some = [x for x in some if (not x in ['triggers','views','sequences','tables','domains','parameters','routines','schemata','attributes','columns'])]
some.sort()
print tag,', '.join(some)
def checkOneClass(aClass,aType):
global me
connection = psycopg2.connect(me.dsn)
cursor = connection.cursor()
table = aClass(logger = me.logger)
expectedList = []
expectedTableClasses = schema.getOrderedSetupList([aClass])
for t in expectedTableClasses:
expectedList.append(t(logger = me.logger))
try:
schema.teardownDatabase(me.config,me.logger)
matchingTables = [x for x in socorro_psg.tablesMatchingPattern(table.name+'%',cursor) if not x.endswith('_id_seq')]
assert [] == matchingTables ,'For class %s saw %s'%(table.name,matchingTables)
# call create
before = set(socorro_psg.tablesMatchingPattern('%',cursor))
ignore = [x for x in before if (x.startswith('pg_toast') or x.endswith('id_seq'))]
before -= set(ignore)
table.create(cursor)
connection.commit()
after = set(socorro_psg.tablesMatchingPattern('%',cursor))
ignore = [x for x in after if (x.startswith('pg_toast') or x.endswith('id_seq'))]
after -= set(ignore)
expectedDiff = hardCodedSchemaClasses[aClass][1]
assert expectedDiff == after - before, 'for %s: after-before=\n got:%s\nwanted:%s'%(table.name,after-before,expectedDiff)
# call drop
table.drop(cursor)
connection.commit()
afterDrop = set(socorro_psg.tablesMatchingPattern('%',cursor))
assert not table.name in afterDrop
finally:
cursor.execute("DROP TABLE IF EXISTS %s CASCADE"%(','.join([x.name for x in expectedList])))
connection.commit()
connection.close()
def testCreateAndDropEachTable():
"""
testCreateAndDropEachTable(): (slow=1)
Loop through every table in the classes we discovered in schema.py and
- check that the test is in sync with schema.py (in function makeClassList)
- check that create works for each Table or PartitionedTable in schema.py
- check that drop works for each Table or PartitionedTable in schema.py
"""
global schemaClasses
makeClassList()
for c in schemaClasses:
checkOneClass(c,schemaClasses[c])
|
boudewijnrempt/HyvesDesktop
|
3rdparty/socorro/socorro/unittest/database/testSchema.py
|
Python
|
gpl-2.0
| 19,160
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OrdersDashboardConfig(AppConfig):
label = 'orders_dashboard'
name = 'oscar.apps.dashboard.orders'
verbose_name = _('Orders dashboard')
|
canhhs91/greenpointtrees
|
src/oscar/apps/dashboard/orders/config.py
|
Python
|
mit
| 245
|
#John Bozzella, SoftDes 2015
#Function to import lyrics from genius.com with inputs of the URLs and titles
#Extra print statements in the actual function are for debugging
from pattern.web import *
sites = ['http://genius.com/Madvillain-accordion-lyrics' , 'http://genius.com/Madvillain-meat-grinder-lyrics' , 'http://genius.com/Madvillain-americas-most-blunted-lyrics' , 'http://genius.com/Madvillain-rainbows-lyrics' , 'http://genius.com/Madvillain-curls-lyrics' , 'http://genius.com/Madvillain-money-folder-lyrics' , 'http://genius.com/Madvillain-shadows-of-tomorrow-lyrics' , 'http://genius.com/Madvillain-figaro-lyrics' , 'http://genius.com/Madvillain-fancy-clown-lyrics' , 'http://genius.com/Madvillain-eye-lyrics' , 'http://genius.com/Madvillain-all-caps-lyrics' , 'http://genius.com/Madvillain-great-day-lyrics' , 'http://genius.com/Madvillain-rhinestone-cowboy-lyrics']
titles = ['Accordion' , 'Meat_Grinder' , 'Americas_Most_Blunted' , 'Rainbows' , 'Curls' , 'Money_Folder' , 'Shadows_of _omorrow' , 'Figaro' , 'Fancy_Clown' , 'Eye' , 'All_Caps' , 'Great_Day', 'Rhinestone_Cowboy']
def lyricsimport(sites,titles):
# originially this was just a script, hence the commented-out code
#from pattern.web import *
#define necessary lists
#sites = ['http://genius.com/Madvillain-accordion-lyrics' , 'http://genius.com/Madvillain-meat-grinder-lyrics' , 'http://genius.com/Madvillain-americas-most-blunted-lyrics' , 'http://genius.com/Madvillain-rainbows-lyrics' , 'http://genius.com/Madvillain-curls-lyrics' , 'http://genius.com/Madvillain-money-folder-lyrics' , 'http://genius.com/Madvillain-shadows-of-tomorrow-lyrics' , 'http://genius.com/Madvillain-figaro-lyrics' , 'http://genius.com/Madvillain-fancy-clown-lyrics' , 'http://genius.com/Madvillain-eye-lyrics' , 'http://genius.com/Madvillain-all-caps-lyrics' , 'http://genius.com/Madvillain-great-day-lyrics' , 'http://genius.com/Madvillain-rhinestone-cowboy-lyrics']
#titles = ['Accordion' , 'Meat_Grinder' , 'Americas_Most_Blunted' , 'Rainbows' , 'Curls' , 'Money_Folder' , 'Shadows_of _omorrow' , 'Figaro' , 'Fancy_Clown' , 'Eye' , 'All_Caps' , 'Great_Day', 'Rhinestone_Cowboy']
text = []
lines = []
lyrics = []
boundlines = [0,0]
print len(titles)
print len(sites)
for count , elem in enumerate(sites): #Create a list containing the HTML from each page as plaintext, split up by line
titles[count] = plaintext(URL(sites[count]).download())
text.append(titles[count].splitlines())
for count , elem in enumerate(text): #Create a list where each element is the number of lines in each page
lines.append(len(text[count]))
print len(text)
print len(lines)
for count , elem in enumerate(text): #cut the plain text HTML into just the song lyrics
subtext = text[count]
for n in range(lines[count]): #determine which lines are actually the lyrics
if subtext[n] == 'Embed' and n<100: #this is done by checking for key words that appear in the HTML on each page
#print n
boundlines[0] = n+2
if subtext[n] == 'Edit the description to add:':
#print n
boundlines[1] = n
lyrics.append(subtext[boundlines[0] : boundlines[1]]) #create a list : each element is a list of the lyrics of each song from [sites] by line
print len(lyrics)
return lyrics
# call the function and print the lyrics to Great Day
lyrics = lyricsimport(sites,titles)
print lyrics[0]
print set(w.lower for w in lyrics[0])
|
bozzellaj/SoftwareDesignFall15
|
MP1/lyricsimportworkfile.py
|
Python
|
mit
| 3,457
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from driven.data_sets.expression_profile import ExpressionProfile
from driven.data_sets.fluxes import FluxConstraints
class ExpressionProfileTestCase(unittest.TestCase):
def test_difference(self):
genes = ["G1"]
conditions = ["T1", "T2", "T3", "T4"]
expression = np.zeros((1, 4))
expression[0] = [10, 11, 65, 109]
pvalues = np.zeros((1, 3))
pvalues[0] = [0.02, 0.048, 0.0012]
profile = ExpressionProfile(genes, conditions, expression, pvalues)
self.assertEqual(profile.differences(), {"G1": [0, 0, 1]})
def test_export_import(self):
genes = ["G1"]
conditions = ["T1", "T2", "T3", "T4"]
expression = np.zeros((1, 4))
expression[0] = [10, 11, 65, 109]
profile = ExpressionProfile(genes, conditions, expression)
data_frame = profile.data_frame
new_profile = ExpressionProfile.from_data_frame(data_frame)
self.assertEqual(profile, new_profile)
class FluxConstraintsTestCase(unittest.TestCase):
def test_export_import(self):
reaction_ids = ["R1", "R2", "R3"]
limits = np.zeros((3, 2))
limits[0] = [0, 10]
limits[1] = [0.5, 0.7]
limits[2] = [5.1, 5.2]
flux_constraints = FluxConstraints(reaction_ids, limits)
new_flux_constraints = FluxConstraints.from_data_frame(flux_constraints.data_frame, type="constraints")
print(new_flux_constraints.data_frame)
self.assertEqual(flux_constraints, new_flux_constraints)
|
biosustain/driven
|
tests/test_data_sets.py
|
Python
|
apache-2.0
| 2,184
|
from opsy.exceptions import OpsyError
class OpsyMonitoringError(OpsyError):
"""Base class for exceptions in the monitoring plugin."""
class PollFailure(OpsyMonitoringError):
"""The poll failed."""
class BackendNotFound(OpsyMonitoringError):
"""Unable to load specified backend."""
|
testeddoughnut/opsy
|
opsy/monitoring/exceptions.py
|
Python
|
mit
| 299
|
#!/usr/bin/env python3
if __name__ == '__main__':
python_custom_file = open('../python-sort-locale-custom.py', 'w')
with open('../python-sort-locale.py') as python_file:
for python_line in python_file:
if 'import sys' in python_line:
python_custom_file.write('import re\n')
elif 'words = []' in python_line:
python_custom_file.write(' conversion = {\n')
sed_substitute_file = open('greek-substitute.sed', 'w')
sed_restore_file = open('greek-restore.sed', 'w')
with open('greek-substitute.tsv') as input_file:
for line in input_file:
if line != '\n' and line[0] != '#':
(char_src, char_dst) = line[:-1].split(' ')
sed_substitute_file.write('s/{}/{}/g\n'.format(char_src, char_dst))
sed_restore_file.write('s/{}/{}/g\n'.format(char_src, char_dst))
python_custom_file.write(" '{}': '{}',\n".format(char_src, char_dst))
python_custom_file.write(' }\n')
python_custom_file.write(' substitute = {}\n')
python_custom_file.write(' restore = {}\n')
python_custom_file.write(' for char in conversion.keys():\n')
python_custom_file.write(" substitute[conversion[char]] = re.compile('{}'.format(char))\n")
python_custom_file.write(" restore[char] = re.compile('{}'.format(conversion[char]))\n")
elif 'words.append(word)' in python_line:
python_custom_file.write(' for repl in substitute.keys():\n')
python_custom_file.write(' word = re.sub(substitute[repl], repl, word)\n')
elif "print('{}'.format(word))" in python_line:
python_custom_file.write(' for repl in restore.keys():\n')
python_custom_file.write(' word = re.sub(restore[repl], repl, word)\n')
python_custom_file.write(python_line)
|
OpenTaal/alphabetical-sort
|
filters/preprocess.py
|
Python
|
mit
| 2,133
|
import unittest
import responses
import digitalocean
import json
from .BaseTest import BaseTest
class TestTags(BaseTest):
def setUp(self):
super(TestTags, self).setUp()
@responses.activate
def test_load(self):
data = self.load_from_file('tags/single.json')
url = self.base_url + "tags/awesome"
responses.add(responses.GET,
url,
body=data,
status=200,
content_type='application/json')
droplet_tag = digitalocean.Tag(name='awesome', token=self.token)
droplet_tag.load()
self.assert_get_url_equal(responses.calls[0].request.url, url)
self.assertEqual(droplet_tag.name,
"awesome")
@responses.activate
def test_create(self):
data = self.load_from_file('tags/single.json')
url = self.base_url + "tags"
responses.add(responses.POST,
url,
body=data,
status=201,
content_type='application/json')
droplet_tag = digitalocean.Tag(name='awesome', token=self.token)
droplet_tag.create()
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags")
self.assertEqual(droplet_tag.name, "awesome")
@responses.activate
def test_delete(self):
url = self.base_url + "tags/awesome"
responses.add(responses.DELETE,
url,
status=204,
content_type='application/json')
droplet_tag = digitalocean.Tag(name='awesome', token=self.token)
droplet_tag.delete()
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags/awesome")
self.assertEqual(droplet_tag.name, "awesome")
@responses.activate
def test_add_droplets(self):
url = self.base_url + "tags/awesome/resources"
responses.add(responses.POST,
url,
status=204,
content_type='application/json')
droplet_tag = digitalocean.Tag(name='awesome', token=self.token)
droplet_tag.add_droplets(["9569411"])
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags/awesome/resources")
@responses.activate
def test_remove_droplets(self):
url = self.base_url + "tags/awesome/resources"
responses.add(responses.DELETE,
url,
status=204,
content_type='application/json')
droplet_tag = digitalocean.Tag(name='awesome', token=self.token)
droplet_tag.remove_droplets(["9569411"])
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags/awesome/resources")
@responses.activate
def test_add_volume_snapshots(self):
url = self.base_url + "tags/awesome/resources"
responses.add(responses.POST,
url,
status=204,
content_type='application/json')
tag = digitalocean.Tag(name='awesome', token=self.token)
tag.add_snapshots(["9569411"])
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags/awesome/resources")
@responses.activate
def test_remove_volume_snapshots(self):
url = self.base_url + "tags/awesome/resources"
responses.add(responses.DELETE,
url,
status=204,
content_type='application/json')
tag = digitalocean.Tag(name='awesome', token=self.token)
tag.remove_snapshots(["9569411"])
self.assertEqual(responses.calls[0].request.url,
self.base_url + "tags/awesome/resources")
if __name__ == '__main__':
unittest.main()
|
koalalorenzo/python-digitalocean
|
digitalocean/tests/test_tag.py
|
Python
|
lgpl-3.0
| 4,000
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from remotecv.detectors import CascadeLoaderDetector
HAIR_OFFSET = 0.12
class FaceDetector(CascadeLoaderDetector):
def __init__(self):
self.load_cascade_file(__file__, "haarcascade_frontalface_alt.xml")
def __add_hair_offset(self, top, height):
top = max(0, top - height * HAIR_OFFSET)
return top
def detect(self, image):
features = self.get_features(image)
points = []
if features:
for (left, top, width, height), _neighbors in features:
top = self.__add_hair_offset(top, height)
points.append([left, top, width, height])
return points
|
thumbor/remotecv
|
remotecv/detectors/face_detector/__init__.py
|
Python
|
mit
| 907
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
|
kkmsc17/smes
|
backend/core/apps.py
|
Python
|
agpl-3.0
| 84
|
#!/usr/bin/env python
# pylint: disable=W0212
from agate import utils
@utils.allow_tableset_proxy
def find(self, test):
"""
Find the first row that passes test.
:param test:
A function that takes a :class:`.Row` and returns :code:`True` if
it matches.
:type test:
:class:`function`
:returns:
A single :class:`.Row` if found, or `None`.
"""
for row in self._rows:
if test(row):
return row
return None
|
flother/agate
|
agate/table/find.py
|
Python
|
mit
| 489
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import random
import sys
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# have an upper-case version of them.
#
# For users with Python 3 or Python 2.7
# with `from __future__ import print_function`, we could also allow lowercase.
# See https://github.com/tensorflow/tensorflow/issues/18053
# pylint: disable=invalid-name
@deprecated("2018-08-20", "Use tf.print instead of tf.Print. Note that "
"tf.print returns a no-output operator that directly "
"prints the output. Outside of defuns or eager mode, "
"this operator will not be executed unless it is "
"directly specified in session.run or used as a "
"control dependency for other operators. This is "
"only a concern in graph mode. Below is an example "
"of how to ensure tf.print executes in graph mode:\n"
"""```python
sess = tf.Session()
with sess.as_default():
tensor = tf.range(10)
print_op = tf.print(tensor)
with tf.control_dependencies([print_op]):
out = tf.add(tensor, tensor)
sess.run(out)
```
Additionally, to use tf.print in python 2.7, users must make sure to import
the following:
`from __future__ import print_function`
""")
@tf_export(v1=["Print"])
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op (behaves like `tf.identity`) with the side effect
of printing `data` when evaluating.
Note: This op prints to the standard error. It is not currently compatible
with jupyter notebook (printing to the notebook *server's* output, not into
the notebook).
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and contents as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
# pylint: enable=invalid-name
def _generate_placeholder_string(x, default_placeholder="{}"):
"""Generate and return a string that does not appear in `x`."""
placeholder = default_placeholder
rng = random.Random(5)
while placeholder in x:
placeholder = placeholder + str(rng.randint(0, 9))
return placeholder
def _is_filepath(output_stream):
"""Returns True if output_stream is a file path."""
return isinstance(output_stream, str) and output_stream.startswith("file://")
# Temporarily disable pylint g-doc-args error to allow giving more context
# about what the kwargs are.
# Because we are using arbitrary-length positional arguments, python 2
# does not support explicitly specifying the keyword arguments in the
# function definition.
# pylint: disable=g-doc-args
@tf_export("print")
def print_v2(*inputs, **kwargs):
"""Print the specified inputs.
Returns an operator that prints the specified inputs to a desired
output stream or logging level. The inputs may be dense or sparse Tensors,
primitive python objects, data structures that contain Tensors, and printable
python objects. Printed tensors will recursively show the first and last
`summarize` elements of each dimension.
With eager execution enabled and/or inside a `tf.contrib.eager.defun` this
operator will automatically execute, and users only need to call `tf.print`
without using the return value. When constructing graphs outside of a
`tf.contrib.eager.defun`, one must either include the returned op
in the input to `session.run`, or use the operator as a control dependency for
executed ops by specifying `with tf.control_dependencies([print_op])`.
@compatibility(python2)
In python 2.7, make sure to import the following:
`from __future__ import print_function`
@end_compatibility
Example:
Single-input usage:
```python
tf.enable_eager_execution()
tensor = tf.range(10)
tf.print(tensor, output_stream=sys.stderr)
```
(This prints "[0 1 2 ... 7 8 9]" to sys.stderr)
Multi-input usage:
```python
tf.enable_eager_execution()
tensor = tf.range(10)
tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout)
```
(This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
sys.stdout)
Usage in a defun:
```python
tf.enable_eager_execution()
@tf.contrib.eager.defun
def f():
tensor = tf.range(10)
tf.print(tensor, output_stream=sys.stderr)
return tensor
range_tensor = f()
```
(This prints "[0 1 2 ... 7 8 9]" to sys.stderr)
Usage when constructing graphs:
```python
sess = tf.Session()
with sess.as_default():
tensor = tf.range(10)
print_op = tf.print("tensors:", tensor, {2: tensor * 2},
output_stream=sys.stdout)
with tf.control_dependencies([print_op]):
tripled_tensor = tensor * 3
sess.run(tripled_tensor)
```
(This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
sys.stdout)
Note: This op is only partially compatible with Jupyter notebooks and colabs.
Because it prints to the C++ standard out / standard error, this will go
in the notebook kernel's console output, not in the notebook cell output.
Args:
*inputs: Positional arguments that are the inputs to print. Inputs in the
printed output will be separated by spaces. Inputs may be python
primitives, tensors, data structures such as dicts and lists that
may contain tensors (with the data structures possibly nested in
arbitrary ways), and printable python objects.
output_stream: The output stream, logging level, or file to print to.
Defaults to sys.stderr, but sys.stdout, tf.logging.info,
tf.logging.warning, and tf.logging.error are also supported. To print to
a file, pass a string started with "file://" followed by the file path,
e.g., "file:///tmp/foo.out".
summarize: The first and last `summarize` elements within each dimension are
recursively printed per Tensor. If None, then the first 3 and last 3
elements of each dimension are printed for each tensor. If set to -1, it
will print all elements of every tensor.
name: A name for the operation (optional).
Returns:
A print operator that prints the specified inputs in the specified output
stream or logging level.
Raises:
ValueError: If an unsupported output stream is specified.
"""
# Because we are using arbitrary-length positional arguments, python 2
# does not support explicitly specifying the keyword arguments in the
# function definition. So, we manually get the keyword arguments w/ default
# values here.
output_stream = kwargs.pop("output_stream", sys.stderr)
name = kwargs.pop("name", None)
summarize = kwargs.pop("summarize", 3)
if kwargs:
raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs)
format_name = None
if name:
format_name = name + "_format"
# Match the C++ string constants representing the different output streams.
# Keep this updated!
output_stream_to_constant = {
sys.stdout: "stdout",
sys.stderr: "stderr",
tf_logging.INFO: "log(info)",
tf_logging.info: "log(info)",
tf_logging.WARN: "log(warning)",
tf_logging.warning: "log(warning)",
tf_logging.warn: "log(warning)",
tf_logging.ERROR: "log(error)",
tf_logging.error: "log(error)",
}
if _is_filepath(output_stream):
output_stream_string = output_stream
else:
output_stream_string = output_stream_to_constant.get(output_stream)
if not output_stream_string:
raise ValueError(
"Unsupported output stream, logging level, or file." +
str(output_stream) + ". Supported streams are sys.stdout, "
"sys.stderr, tf.logging.info, "
"tf.logging.warning, tf.logging.error. " +
"File needs to be in the form of 'file://<filepath>'.")
# If we are only printing a single string scalar, there is no need to format
if (len(inputs) == 1 and tensor_util.is_tensor(inputs[0])
and (not isinstance(inputs[0], sparse_tensor.SparseTensor))
and inputs[0].shape and (inputs[0].dtype == dtypes.string)):
formatted_string = inputs[0]
# Otherwise, we construct an appropriate template for the tensors we are
# printing, and format the template using those tensors.
else:
# For each input to this print function, we extract any nested tensors,
# and construct an appropriate template to format representing the
# printed input.
templates = []
tensors = []
tensor_free_structure = nest.map_structure(
lambda x: "" if tensor_util.is_tensor(x) else x,
inputs)
tensor_free_template = " ".join(pprint.pformat(x)
for x in tensor_free_structure)
placeholder = _generate_placeholder_string(tensor_free_template)
for input_ in inputs:
placeholders = []
# Use the nest utilities to flatten & process any nested elements in this
# input. The placeholder for a tensor in the template should be the
# placeholder string, and the placeholder for a non-tensor can just be
# the printed value of the non-tensor itself.
for x in nest.flatten(input_):
# support sparse tensors
if isinstance(x, sparse_tensor.SparseTensor):
tensors.extend([x.indices, x.values, x.dense_shape])
placeholders.append(
"SparseTensor(indices={}, values={}, shape={})".format(
placeholder, placeholder, placeholder)
)
elif tensor_util.is_tensor(x):
tensors.append(x)
placeholders.append(placeholder)
else:
placeholders.append(x)
if isinstance(input_, six.string_types):
# If the current input to format/print is a normal string, that string
# can act as the template.
cur_template = input_
else:
# We pack the placeholders into a data structure that matches the
# input data structure format, then format that data structure
# into a string template.
#
# NOTE: We must use pprint.pformat here for building the template for
# unordered data structures such as `dict`, because `str` doesn't
# guarantee orderings, while pprint prints in sorted order. pprint
# will match the ordering of `nest.flatten`.
# This even works when nest.flatten reorders OrderedDicts, because
# pprint is printing *after* the OrderedDicts have been reordered.
cur_template = pprint.pformat(
nest.pack_sequence_as(input_, placeholders))
templates.append(cur_template)
# We join the templates for the various inputs into a single larger
# template. We also remove all quotes surrounding the placeholders, so that
# the formatted/printed output will not contain quotes around tensors.
# (example of where these quotes might appear: if we have added a
# placeholder string into a list, then pretty-formatted that list)
template = " ".join(templates)
template = template.replace("'" + placeholder + "'", placeholder)
formatted_string = string_ops.string_format(
inputs=tensors, template=template, placeholder=placeholder,
summarize=summarize,
name=format_name)
return gen_logging_ops.print_v2(formatted_string,
output_stream=output_stream_string,
name=name)
# pylint: enable=g-doc-args
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
@deprecated(
"2016-11-30", "Please switch to tf.summary.histogram. Note that "
"tf.summary.histogram uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops.audio_summary_v2(
tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops.merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
@deprecated(
"2016-11-30", "Please switch to tf.summary.scalar. Note that "
"tf.summary.scalar uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, passing a "
"tensor or list of tags to a scalar summary op is no longer "
"supported.")
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
ops.NotDifferentiable("TensorSummary")
ops.NotDifferentiable("TensorSummaryV2")
ops.NotDifferentiable("Timestamp")
|
asimshankar/tensorflow
|
tensorflow/python/ops/logging_ops.py
|
Python
|
apache-2.0
| 25,649
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cross-platform utilities for creating subprocesses.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
import platform
import subprocess
import traceback
from typing import TYPE_CHECKING
# On Windows, we need to use shell=True when creating subprocesses for binary
# paths to be resolved correctly.
force_shell = platform.system() == 'Windows'
# We mimic the interface of the standard Python subprocess module.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
if TYPE_CHECKING:
call = subprocess.call
check_call = subprocess.check_call
check_output = subprocess.check_output
Popen = subprocess.Popen
else:
def call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {}\n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error. output))
else:
raise RuntimeError("Full trace: {}\
\n Output of the failed child process: {} " \
.format(traceback.format_exc(), error.output))
return out
def check_call(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_call(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {} \
\n Output of the failed child process: {}" \
.format(traceback.format_exc(), error.output))
return out
def check_output(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
try:
out = subprocess.check_output(*args, **kwargs)
except OSError:
raise RuntimeError("Executable {} not found".format(args[0]))
except subprocess.CalledProcessError as error:
if isinstance(args, tuple) and (args[0][2] == "pip"):
raise RuntimeError( \
"Full traceback: {} \n Pip install failed for package: {} \
\n Output from execution of subprocess: {}" \
.format(traceback.format_exc(), args[0][6], error.output))
else:
raise RuntimeError("Full trace: {}, \
output of the failed child process {} "\
.format(traceback.format_exc(), error.output))
return out
def Popen(*args, **kwargs):
if force_shell:
kwargs['shell'] = True
return subprocess.Popen(*args, **kwargs)
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/utils/processes.py
|
Python
|
apache-2.0
| 3,871
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# friday documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import friday
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Friday'
copyright = u"2017, Isaac Luke Smith"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = friday.__version__
# The full version, including alpha/beta/rc tags.
release = friday.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'fridaydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'friday.tex',
u'Friday Documentation',
u'Isaac Luke Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'friday',
u'Friday Documentation',
[u'Isaac Luke Smith'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'friday',
u'Friday Documentation',
u'Isaac Luke Smith',
'friday',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
Zenohm/Friday
|
docs/conf.py
|
Python
|
mit
| 8,390
|
import re
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
with open(os.path.join(here, 'pyvac', '__init__.py')) as v_file:
version = re.compile(r".*__version__ = '(.*?)'",
re.S).match(v_file.read()).group(1)
requires = [
'pyramid',
'SQLAlchemy',
'pyramid_jinja2',
'pyramid_tm',
'zope.sqlalchemy',
'celery',
'kombu',
'simplejson >=2.1',
'jsonschema >=0.7',
'pyyaml',
'cryptacular',
'passlib',
'caldav',
'icalendar',
'python-ldap',
'workalendar',
'psycopg2',
'translationstring==1.1',
# dev only
# 'waitress',
]
tests_require = ['nose', 'mock', 'tox', 'freezegun']
if sys.version_info[:2] < (2, 7):
requires.extend(['logutils'])
tests_require += ['unittest2']
extras_require = {
'test': tests_require,
}
data_files = []
setup(name='pyvac',
version=version,
description='pyvac',
long_description=README + '\n\n' + CHANGES,
classifiers=["Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='pyvac',
install_requires=requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points="""\
[paste.app_factory]
main = pyvac:main
[console_scripts]
pyvac_install = pyvac.bin.install:main
pyvac_shell = pyvac.bin.shell:main
pyvac_celeryd = pyvac.bin.celerycmd:celeryd
pyvac_import = pyvac.bin.importldap:main
pyvac_replay = pyvac.bin.replay:main
""",
data_files=data_files,
)
|
doyousoft/pyvac
|
setup.py
|
Python
|
bsd-3-clause
| 2,082
|
# -*- coding: utf-8 -*-
# models.py ---
# created: 2012-03-13 23:08:13
#
from django.db import models
class User(models.Model):
name = models.CharField(max_length=60)
age = models.IntegerField()
class Meta:
db_table = 'user'
#
# models.py ends here
|
wuher/devil
|
example/userdb/api/models.py
|
Python
|
mit
| 280
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import pytest
from kubernetes_tests.test_base import EXECUTOR, TestBase
@pytest.mark.skipif(EXECUTOR != 'KubernetesExecutor', reason="Only runs on KubernetesExecutor")
class TestKubernetesExecutor(TestBase):
def test_integration_run_dag(self):
dag_id = 'example_kubernetes_executor'
dag_run_id, execution_date = self.start_job_in_kubernetes(dag_id, self.host)
print(f"Found the job with execution_date {execution_date}")
# Wait some time for the operator to complete
self.monitor_task(
host=self.host,
dag_run_id=dag_run_id,
dag_id=dag_id,
task_id='start_task',
expected_final_state='success',
timeout=300,
)
self.ensure_dag_expected_state(
host=self.host,
execution_date=execution_date,
dag_id=dag_id,
expected_final_state='success',
timeout=300,
)
def test_integration_run_dag_with_scheduler_failure(self):
dag_id = 'example_kubernetes_executor'
dag_run_id, execution_date = self.start_job_in_kubernetes(dag_id, self.host)
self._delete_airflow_pod("scheduler")
time.sleep(10) # give time for pod to restart
# Wait some time for the operator to complete
self.monitor_task(
host=self.host,
dag_run_id=dag_run_id,
dag_id=dag_id,
task_id='start_task',
expected_final_state='success',
timeout=300,
)
self.monitor_task(
host=self.host,
dag_run_id=dag_run_id,
dag_id=dag_id,
task_id='other_namespace_task',
expected_final_state='success',
timeout=300,
)
self.ensure_dag_expected_state(
host=self.host,
execution_date=execution_date,
dag_id=dag_id,
expected_final_state='success',
timeout=300,
)
assert self._num_pods_in_namespace('test-namespace') == 0, "failed to delete pods in other namespace"
|
Acehaidrey/incubator-airflow
|
kubernetes_tests/test_kubernetes_executor.py
|
Python
|
apache-2.0
| 2,909
|
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Float,
Integer,
Bool,
MinMax,
Set,
NoneSet,
String,
Alias,
)
from openpyxl.descriptors.excel import Coordinate, Percentage
from openpyxl.descriptors.nested import (
EmptyTag
)
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
from .colors import ColorChoiceDescriptor
from .fill import (
GradientFillProperties,
BlipFillProperties,
PatternFillProperties,
)
from .line import LineProperties
from openpyxl.styles.colors import Color
from openpyxl.xml.constants import DRAWING_NS
class Point2D(Serialisable):
x = Coordinate()
y = Coordinate()
def __init__(self,
x=None,
y=None,
):
self.x = x
self.y = y
class PositiveSize2D(Serialisable):
"""
Dimensions in EMUs
"""
cx = Integer()
width = Alias('cx')
cy = Integer()
height = Alias('cy')
def __init__(self,
cx=None,
cy=None,
):
self.cx = cx
self.cy = cy
class Transform2D(Serialisable):
tagname = "xfrm"
rot = Integer(allow_none=True)
flipH = Bool(allow_none=True)
flipV = Bool(allow_none=True)
off = Typed(expected_type=Point2D, allow_none=True)
ext = Typed(expected_type=PositiveSize2D, allow_none=True)
__elements__ = ('off', 'ext')
def __init__(self,
rot=None,
flipH=None,
flipV=None,
off=None,
ext=None,
):
self.rot = rot
self.flipH = flipH
self.flipV = flipV
self.off = off
self.ext = ext
class SphereCoords(Serialisable):
lat = Typed(expected_type=Integer)
lon = Typed(expected_type=Integer)
rev = Typed(expected_type=Integer)
def __init__(self,
lat=None,
lon=None,
rev=None,
):
self.lat = lat
self.lon = lon
self.rev = rev
class Camera(Serialisable):
prst = Typed(expected_type=Set(values=(['legacyObliqueTopLeft',
'legacyObliqueTop', 'legacyObliqueTopRight', 'legacyObliqueLeft',
'legacyObliqueFront', 'legacyObliqueRight', 'legacyObliqueBottomLeft',
'legacyObliqueBottom', 'legacyObliqueBottomRight',
'legacyPerspectiveTopLeft', 'legacyPerspectiveTop',
'legacyPerspectiveTopRight', 'legacyPerspectiveLeft',
'legacyPerspectiveFront', 'legacyPerspectiveRight',
'legacyPerspectiveBottomLeft', 'legacyPerspectiveBottom',
'legacyPerspectiveBottomRight', 'orthographicFront', 'isometricTopUp',
'isometricTopDown', 'isometricBottomUp', 'isometricBottomDown',
'isometricLeftUp', 'isometricLeftDown', 'isometricRightUp',
'isometricRightDown', 'isometricOffAxis1Left', 'isometricOffAxis1Right',
'isometricOffAxis1Top', 'isometricOffAxis2Left',
'isometricOffAxis2Right', 'isometricOffAxis2Top',
'isometricOffAxis3Left', 'isometricOffAxis3Right',
'isometricOffAxis3Bottom', 'isometricOffAxis4Left',
'isometricOffAxis4Right', 'isometricOffAxis4Bottom', 'obliqueTopLeft',
'obliqueTop', 'obliqueTopRight', 'obliqueLeft', 'obliqueRight',
'obliqueBottomLeft', 'obliqueBottom', 'obliqueBottomRight',
'perspectiveFront', 'perspectiveLeft', 'perspectiveRight',
'perspectiveAbove', 'perspectiveBelow', 'perspectiveAboveLeftFacing',
'perspectiveAboveRightFacing', 'perspectiveContrastingLeftFacing',
'perspectiveContrastingRightFacing', 'perspectiveHeroicLeftFacing',
'perspectiveHeroicRightFacing', 'perspectiveHeroicExtremeLeftFacing',
'perspectiveHeroicExtremeRightFacing', 'perspectiveRelaxed',
'perspectiveRelaxedModerately'])))
fov = Typed(expected_type=Integer)
zoom = Typed(expected_type=Percentage, allow_none=True)
rot = Typed(expected_type=SphereCoords, allow_none=True)
def __init__(self,
prst=None,
fov=None,
zoom=None,
rot=None,
):
self.prst = prst
self.fov = fov
self.zoom = zoom
self.rot = rot
class LightRig(Serialisable):
rig = Typed(expected_type=Set(values=(['legacyFlat1', 'legacyFlat2',
'legacyFlat3', 'legacyFlat4', 'legacyNormal1', 'legacyNormal2',
'legacyNormal3', 'legacyNormal4', 'legacyHarsh1', 'legacyHarsh2',
'legacyHarsh3', 'legacyHarsh4', 'threePt', 'balanced', 'soft', 'harsh',
'flood', 'contrasting', 'morning', 'sunrise', 'sunset', 'chilly',
'freezing', 'flat', 'twoPt', 'glow', 'brightRoom'])))
dir = Typed(expected_type=Set(values=(['tl', 't', 'tr', 'l', 'r', 'bl', 'b', 'br'])))
rot = Typed(expected_type=SphereCoords, allow_none=True)
def __init__(self,
rig=None,
dir=None,
rot=None,
):
self.rig = rig
self.dir = dir
self.rot = rot
class Vector3D(Serialisable):
dx = Typed(expected_type=Coordinate, )
dy = Typed(expected_type=Coordinate, )
dz = Typed(expected_type=Coordinate, )
def __init__(self,
dx=None,
dy=None,
dz=None,
):
self.dx = dx
self.dy = dy
self.dz = dz
class Point3D(Serialisable):
x = Typed(expected_type=Coordinate, )
y = Typed(expected_type=Coordinate, )
z = Typed(expected_type=Coordinate, )
def __init__(self,
x=None,
y=None,
z=None,
):
self.x = x
self.y = y
self.z = z
class Backdrop(Serialisable):
anchor = Typed(expected_type=Point3D, )
norm = Typed(expected_type=Vector3D, )
up = Typed(expected_type=Vector3D, )
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
anchor=None,
norm=None,
up=None,
extLst=None,
):
self.anchor = anchor
self.norm = norm
self.up = up
self.extLst = extLst
class Scene3D(Serialisable):
camera = Typed(expected_type=Camera, )
lightRig = Typed(expected_type=LightRig, )
backdrop = Typed(expected_type=Backdrop, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
camera=None,
lightRig=None,
backdrop=None,
extLst=None,
):
self.camera = camera
self.lightRig = lightRig
self.backdrop = backdrop
self.extLst = extLst
class Bevel(Serialisable):
w = Typed(expected_type=Integer())
h = Typed(expected_type=Integer())
prst = Typed(expected_type=Set(values=(['relaxedInset', 'circle',
'slope', 'cross', 'angle', 'softRound', 'convex', 'coolSlant', 'divot',
'riblet', 'hardEdge', 'artDeco'])))
def __init__(self,
w=None,
h=None,
prst=None,
):
self.w = w
self.h = h
self.prst = prst
class Shape3D(Serialisable):
z = Typed(expected_type=Coordinate, allow_none=True)
extrusionH = Typed(expected_type=Integer())
contourW = Typed(expected_type=Integer())
prstMaterial = Typed(expected_type=Set(values=(['legacyMatte',
'legacyPlastic', 'legacyMetal', 'legacyWireframe', 'matte', 'plastic',
'metal', 'warmMatte', 'translucentPowder', 'powder', 'dkEdge',
'softEdge', 'clear', 'flat', 'softmetal'])))
bevelT = Typed(expected_type=Bevel, allow_none=True)
bevelB = Typed(expected_type=Bevel, allow_none=True)
extrusionClr = Typed(expected_type=Color, allow_none=True)
contourClr = Typed(expected_type=Color, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
z=None,
extrusionH=None,
contourW=None,
prstMaterial=None,
bevelT=None,
bevelB=None,
extrusionClr=None,
contourClr=None,
extLst=None,
):
self.z = z
self.extrusionH = extrusionH
self.contourW = contourW
self.prstMaterial = prstMaterial
self.bevelT = bevelT
self.bevelB = bevelB
self.extrusionClr = extrusionClr
self.contourClr = contourClr
self.extLst = extLst
class Path2D(Serialisable):
w = Float()
h = Float()
fill = NoneSet(values=(['norm', 'lighten', 'lightenLess', 'darken', 'darkenLess']))
stroke = Bool(allow_none=True)
extrusionOk = Bool(allow_none=True)
def __init__(self,
w=None,
h=None,
fill=None,
stroke=None,
extrusionOk=None,
):
self.w = w
self.h = h
self.fill = fill
self.stroke = stroke
self.extrusionOk = extrusionOk
class Path2DList(Serialisable):
path = Typed(expected_type=Path2D, allow_none=True)
def __init__(self,
path=None,
):
self.path = path
class GeomRect(Serialisable):
l = Coordinate()
t = Coordinate()
r = Coordinate()
b = Coordinate()
def __init__(self,
l=None,
t=None,
r=None,
b=None,
):
self.l = l
self.t = t
self.r = r
self.b = b
class AdjPoint2D(Serialisable):
x = Coordinate()
y = Coordinate()
def __init__(self,
x=None,
y=None,
):
self.x = x
self.y = y
class ConnectionSite(Serialisable):
ang = MinMax(min=0, max=360) # guess work, can also be a name
pos = Typed(expected_type=AdjPoint2D, )
def __init__(self,
ang=None,
pos=None,
):
self.ang = ang
self.pos = pos
class ConnectionSiteList(Serialisable):
cxn = Typed(expected_type=ConnectionSite, allow_none=True)
def __init__(self,
cxn=None,
):
self.cxn = cxn
class AdjustHandleList(Serialisable):
pass
class GeomGuide(Serialisable):
name = String()
fmla = String()
def __init__(self,
name=None,
fmla=None,
):
self.name = name
self.fmla = fmla
class GeomGuideList(Serialisable):
gd = Typed(expected_type=GeomGuide, allow_none=True)
def __init__(self,
gd=None,
):
self.gd = gd
class CustomGeometry2D(Serialisable):
avLst = Typed(expected_type=GeomGuideList, allow_none=True)
gdLst = Typed(expected_type=GeomGuideList, allow_none=True)
ahLst = Typed(expected_type=AdjustHandleList, allow_none=True)
cxnLst = Typed(expected_type=ConnectionSiteList, allow_none=True)
rect = Typed(expected_type=GeomRect, allow_none=True)
pathLst = Typed(expected_type=Path2DList, )
def __init__(self,
avLst=None,
gdLst=None,
ahLst=None,
cxnLst=None,
rect=None,
pathLst=None,
):
self.avLst = avLst
self.gdLst = gdLst
self.ahLst = ahLst
self.cxnLst = cxnLst
self.rect = rect
self.pathLst = pathLst
class PresetGeometry2D(Serialisable):
namespace = DRAWING_NS
prst = Set(values=(
['line', 'lineInv', 'triangle', 'rtTriangle', 'rect',
'diamond', 'parallelogram', 'trapezoid', 'nonIsoscelesTrapezoid',
'pentagon', 'hexagon', 'heptagon', 'octagon', 'decagon', 'dodecagon',
'star4', 'star5', 'star6', 'star7', 'star8', 'star10', 'star12',
'star16', 'star24', 'star32', 'roundRect', 'round1Rect',
'round2SameRect', 'round2DiagRect', 'snipRoundRect', 'snip1Rect',
'snip2SameRect', 'snip2DiagRect', 'plaque', 'ellipse', 'teardrop',
'homePlate', 'chevron', 'pieWedge', 'pie', 'blockArc', 'donut',
'noSmoking', 'rightArrow', 'leftArrow', 'upArrow', 'downArrow',
'stripedRightArrow', 'notchedRightArrow', 'bentUpArrow',
'leftRightArrow', 'upDownArrow', 'leftUpArrow', 'leftRightUpArrow',
'quadArrow', 'leftArrowCallout', 'rightArrowCallout', 'upArrowCallout',
'downArrowCallout', 'leftRightArrowCallout', 'upDownArrowCallout',
'quadArrowCallout', 'bentArrow', 'uturnArrow', 'circularArrow',
'leftCircularArrow', 'leftRightCircularArrow', 'curvedRightArrow',
'curvedLeftArrow', 'curvedUpArrow', 'curvedDownArrow', 'swooshArrow',
'cube', 'can', 'lightningBolt', 'heart', 'sun', 'moon', 'smileyFace',
'irregularSeal1', 'irregularSeal2', 'foldedCorner', 'bevel', 'frame',
'halfFrame', 'corner', 'diagStripe', 'chord', 'arc', 'leftBracket',
'rightBracket', 'leftBrace', 'rightBrace', 'bracketPair', 'bracePair',
'straightConnector1', 'bentConnector2', 'bentConnector3',
'bentConnector4', 'bentConnector5', 'curvedConnector2',
'curvedConnector3', 'curvedConnector4', 'curvedConnector5', 'callout1',
'callout2', 'callout3', 'accentCallout1', 'accentCallout2',
'accentCallout3', 'borderCallout1', 'borderCallout2', 'borderCallout3',
'accentBorderCallout1', 'accentBorderCallout2', 'accentBorderCallout3',
'wedgeRectCallout', 'wedgeRoundRectCallout', 'wedgeEllipseCallout',
'cloudCallout', 'cloud', 'ribbon', 'ribbon2', 'ellipseRibbon',
'ellipseRibbon2', 'leftRightRibbon', 'verticalScroll',
'horizontalScroll', 'wave', 'doubleWave', 'plus', 'flowChartProcess',
'flowChartDecision', 'flowChartInputOutput',
'flowChartPredefinedProcess', 'flowChartInternalStorage',
'flowChartDocument', 'flowChartMultidocument', 'flowChartTerminator',
'flowChartPreparation', 'flowChartManualInput',
'flowChartManualOperation', 'flowChartConnector', 'flowChartPunchedCard',
'flowChartPunchedTape', 'flowChartSummingJunction', 'flowChartOr',
'flowChartCollate', 'flowChartSort', 'flowChartExtract',
'flowChartMerge', 'flowChartOfflineStorage', 'flowChartOnlineStorage',
'flowChartMagneticTape', 'flowChartMagneticDisk',
'flowChartMagneticDrum', 'flowChartDisplay', 'flowChartDelay',
'flowChartAlternateProcess', 'flowChartOffpageConnector',
'actionButtonBlank', 'actionButtonHome', 'actionButtonHelp',
'actionButtonInformation', 'actionButtonForwardNext',
'actionButtonBackPrevious', 'actionButtonEnd', 'actionButtonBeginning',
'actionButtonReturn', 'actionButtonDocument', 'actionButtonSound',
'actionButtonMovie', 'gear6', 'gear9', 'funnel', 'mathPlus', 'mathMinus',
'mathMultiply', 'mathDivide', 'mathEqual', 'mathNotEqual', 'cornerTabs',
'squareTabs', 'plaqueTabs', 'chartX', 'chartStar', 'chartPlus']))
avLst = Typed(expected_type=GeomGuideList, allow_none=True)
def __init__(self,
prst=None,
avLst=None,
):
self.prst = prst
self.avLst = avLst
class FontReference(Serialisable):
idx = NoneSet(values=(['major', 'minor']))
def __init__(self,
idx=None,
):
self.idx = idx
class StyleMatrixReference(Serialisable):
idx = Integer()
def __init__(self,
idx=None,
):
self.idx = idx
class ShapeStyle(Serialisable):
lnRef = Typed(expected_type=StyleMatrixReference, )
fillRef = Typed(expected_type=StyleMatrixReference, )
effectRef = Typed(expected_type=StyleMatrixReference, )
fontRef = Typed(expected_type=FontReference, )
def __init__(self,
lnRef=None,
fillRef=None,
effectRef=None,
fontRef=None,
):
self.lnRef = lnRef
self.fillRef = fillRef
self.effectRef = effectRef
self.fontRef = fontRef
|
aragos/tichu-tournament
|
python/openpyxl/drawing/shapes.py
|
Python
|
mit
| 17,855
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating the usage of BranchPythonOperator with depends_on_past=True, where tasks may be run
or skipped on alternating runs.
"""
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.utils.dates import days_ago
args = {
'owner': 'airflow',
'start_date': days_ago(2),
'depends_on_past': True,
}
dag = DAG(
dag_id='example_branch_dop_operator_v3',
schedule_interval='*/1 * * * *',
default_args=args,
tags=['example']
)
def should_run(**kwargs):
"""
Determine which dummy_task should be run based on if the execution date minute is even or odd.
:param dict kwargs: Context
:return: Id of the task to run
:rtype: str
"""
print('------------- exec dttm = {} and minute = {}'.
format(kwargs['execution_date'], kwargs['execution_date'].minute))
if kwargs['execution_date'].minute % 2 == 0:
return "dummy_task_1"
else:
return "dummy_task_2"
cond = BranchPythonOperator(
task_id='condition',
python_callable=should_run,
dag=dag,
)
dummy_task_1 = DummyOperator(task_id='dummy_task_1', dag=dag)
dummy_task_2 = DummyOperator(task_id='dummy_task_2', dag=dag)
cond >> [dummy_task_1, dummy_task_2]
|
spektom/incubator-airflow
|
airflow/example_dags/example_branch_python_dop_operator_3.py
|
Python
|
apache-2.0
| 2,110
|
from sqlalchemy import create_engine
import json
# TODO: Fix usage of global
engine = None
def loadDB(user, password, hostname, dbname):
global engine
engine = create_engine('postgresql+psycopg2://{}:{}@{}/{}'.format(user, password, hostname, dbname))
|
j-nguyen/FractalBot
|
cogs/utils/db.py
|
Python
|
apache-2.0
| 262
|
from util import hook, timesince
import time
import re
db_ready = False
def db_init(db):
db.execute("""CREATE TABLE if not exists karma(
nick_vote TEXT PRIMARY KEY,
up_karma INTEGER,
down_karma INTEGER,
total_karma INTEGER)""")
db.execute("""CREATE TABLE if not exists karma_voters(
voter TEXT,
votee TEXT,
epoch FLOAT,
PRIMARY KEY(voter, votee))""")
db_ready = True
def up(db, nick_vote):
db.execute("""UPDATE karma SET
up_karma = up_karma + 1,
total_karma = total_karma + 1 WHERE nick_vote=?""", (nick_vote.lower(),))
db.commit()
def down(db, nick_vote):
db.execute("""UPDATE karma SET
down_karma = down_karma + 1,
total_karma = total_karma + 1 WHERE nick_vote=?""", (nick_vote.lower(),))
db.commit()
def allowed(db, nick, nick_vote):
time_restriction = 3600
db.execute("""DELETE FROM karma_voters WHERE ? - epoch >= 3600""",
(time.time(),))
db.commit()
check = db.execute("""SELECT epoch FROM karma_voters WHERE voter=? AND votee=?""",
(nick.lower(), nick_vote.lower())).fetchone()
if check:
check = check[0]
if time.time() - check >= time_restriction:
db.execute("""INSERT OR REPLACE INTO karma_voters(
voter,
votee,
epoch) values(?,?,?)""", (nick.lower(), nick_vote.lower(), time.time()))
db.commit()
return True, 0
else:
return False, timesince.timeuntil(check, now=time.time()-time_restriction)
else:
db.execute("""INSERT OR REPLACE INTO karma_voters(
voter,
votee,
epoch) values(?,?,?)""", (nick.lower(), nick_vote.lower(), time.time()))
db.commit()
return True, 0
# TODO Make this work on multiple matches in a string, right now it'll only
# work on one match.
# karma_re = ('((\S+)(\+\+|\-\-))+', re.I)
karma_re = ('(.+)(\+\+|\-\-)$', re.I)
@hook.regex(*karma_re)
def karma_add(match, nick='', chan='', db=None, notice=None):
if not db_ready:
db_init(db)
nick_vote = match.group(1).strip().replace("+", "")
if nick.lower() == nick_vote.lower():
notice("You can't vote on yourself!")
return
if len(nick_vote) < 3 or " " in nick_vote:
return # ignore anything below 3 chars in length or with spaces
vote_allowed, when = allowed(db, nick, nick_vote)
if vote_allowed:
if match.group(2) == '++':
db.execute("""INSERT or IGNORE INTO karma(
nick_vote,
up_karma,
down_karma,
total_karma) values(?,?,?,?)""", (nick_vote.lower(),0,0,0))
up(db, nick_vote)
notice("Gave {} 1 karma!".format(nick_vote))
if match.group(2) == '--':
db.execute("""INSERT or IGNORE INTO karma(
nick_vote,
up_karma,
down_karma,
total_karma) values(?,?,?,?)""", (nick_vote.lower(),0,0,0))
down(db, nick_vote)
notice("Took away 1 karma from {}.".format(nick_vote))
else:
return
else:
notice("You are trying to vote too often. You can vote again in {}!".format(when))
return
@hook.command('k')
@hook.command
def karma(inp, nick='', chan='', db=None):
"""k/karma <nick> -- returns karma stats for <nick>"""
if not db_ready:
db_init(db)
if not chan.startswith('#'):
return
nick_vote = inp
out = db.execute("""SELECT * FROM karma WHERE nick_vote=?""",
(nick_vote.lower(),)).fetchall()
if not out:
return "That user has no karma."
else:
out = out[0]
return "{} has {} karma points.".format(nick_vote, out[1]-out[2])
return
|
thejordan95/Groovebot2
|
plugins/karma.py
|
Python
|
gpl-3.0
| 4,044
|
import numpy as np
#
# Tensor rotation
# from Peter Mortensen's stackoverflow question
# @ http://stackoverflow.com/questions/4962606/fast-tensor-rotation-with-numpy/18301915
#
n = 9
def rotT_loops(T, g):
Tprime = np.zeros((n,n,n,n))
for i in range(n):
for j in range(n):
for k in range(n):
for l in range(n):
for ii in range(n):
for jj in range(n):
for kk in range(n):
for ll in range(n):
gg = g[ii,i]*g[jj,j]*g[kk,k]*g[ll,l]
Tprime[i,j,k,l] = Tprime[i,j,k,l] + gg*T[ii,jj,kk,ll]
return Tprime
def rotT_numpy(T, g):
"""
Accepted response on stack overflow by phillip
"""
gg = np.outer(g, g)
gggg = np.outer(gg, gg).reshape(4 * g.shape)
axes = ((0, 2, 4, 6), (0, 1, 2, 3))
return np.tensordot(gggg, T, axes)
T = np.random.randn(n,n,n,n)
g = np.random.randn(n,n)
from compare_perf import compare_perf
compare_perf(rotT_loops, [T, g], extra = {'numpy_tensordot': rotT_numpy}, numba= False, backends=('c', 'openmp'), cpython=True)
def rotT_par(T, g):
def compute_elt(i,j,k,l):
total = 0.0
for ii in range(n):
for jj in range(n):
for kk in range(n):
for ll in range(n):
gg = g[ii,i]*g[jj,j]*g[kk,k]*g[ll,l]
total += gg*T[ii,jj,kk,ll]
return total
return np.array([[[[compute_elt(i,j,k,l)
for k in xrange(n)]
for l in xrange(n)]
for j in xrange(n)]
for i in xrange(n)])
compare_perf(rotT_par, [T, g], extra = {'numpy_tensordot': rotT_numpy}, numba= False, backends=('c', 'openmp'), cpython = True)
|
pombredanne/parakeet
|
benchmarks/tensor_rotation.py
|
Python
|
bsd-3-clause
| 1,756
|
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
entry_points={
'sphinx.builders': [
'odt = sphinxcontrib.odfbuilder',
],
}
)
|
mans0954/odfbuilder
|
setup.py
|
Python
|
bsd-2-clause
| 216
|
import unittest
from subprocess import call, DEVNULL
import time
from tests.docker import docker_util
class VMHelper(object):
def __init__(self, vm_name: str, shell: str = "", ssh_username: str = None, ssh_port: str = None):
self.vm_name = vm_name
self.shell = shell # like cmd.exe /c
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.use_ssh = self.ssh_username is not None and self.ssh_port is not None
self.__vm_is_up = False
def start_vm(self):
call('VBoxManage startvm "{0}"'.format(self.vm_name), shell=True)
def stop_vm(self, save=True):
if save:
call('VBoxManage controlvm "{0}" savestate'.format(self.vm_name), shell=True)
return
if self.use_ssh:
self.send_command("sudo shutdown -h now")
else:
call('VBoxManage controlvm "{0}" acpipowerbutton'.format(self.vm_name), shell=True)
def wait_for_vm_up(self):
if not self.__vm_is_up:
print("Waiting for {} to come up.".format(self.vm_name))
command = "ping -c 1" if self.use_ssh else "ping -n 1"
command += " github.com"
while self.__send_command(command, hide_output=True, print_command=False) != 0:
time.sleep(1)
self.__vm_is_up = True
def send_command(self, command: str) -> int:
self.wait_for_vm_up()
return self.__send_command(command)
def __send_command(self, command: str, hide_output=False, print_command=True) -> int:
if self.use_ssh:
fullcmd = ["ssh", "-p", str(self.ssh_port), "{0}@127.0.0.1".format(self.ssh_username), '"{0}"'.format(command)]
else:
fullcmd = ["VBoxManage", "guestcontrol", '"{0}"'.format(self.vm_name), "run"] \
+ self.shell.split(" ") \
+ ['"{0}"'.format(command)]
kwargs = {"stdout": DEVNULL, "stderr": DEVNULL} if hide_output else {}
fullcmd = " ".join(fullcmd)
if print_command:
print("\033[1m" + fullcmd + "\033[0m")
return call(fullcmd, shell=True, **kwargs)
class TestInstallation(unittest.TestCase):
def test_linux(self):
distributions = [
#"archlinux",
"debian8",
#"ubuntu1404",
"ubuntu1604",
#"kali",
# "gentoo" # cant test gentoo till this bug is fixed: https://github.com/docker/docker/issues/1916#issuecomment-184356102
]
for distribution in distributions:
self.assertTrue(docker_util.run_image(distribution, rebuild=False), msg=distribution)
def test_windows(self):
"""
Run the unittests on Windows + Install via Pip
To Fix Windows Error in Guest OS:
type gpedit.msc and go to:
Windows Settings
-> Security Settings
-> Local Policies
-> Security Options
-> Accounts: Limit local account use of blank passwords to console logon only
and set it to DISABLED.
configure pip on guest:
%APPDATA%\Roaming\pip
[global]
no-cache-dir = false
[uninstall]
yes = true
:return:
"""
target_dir = r"C:\urh"
vm_helper = VMHelper("Windows 10", shell="cmd.exe /c")
vm_helper.start_vm()
vm_helper.send_command("pip uninstall urh")
vm_helper.send_command("rd /s /q {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
rc = vm_helper.send_command(r"python C:\urh\src\urh\cythonext\build.py")
self.assertEqual(rc, 0)
rc = vm_helper.send_command(r"py.test C:\urh\tests".format(target_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("pip install urh")
time.sleep(0.5)
rc = vm_helper.send_command("urh autoclose")
self.assertEqual(rc, 0)
vm_helper.send_command("pip uninstall urh")
vm_helper.stop_vm()
def test_osx(self):
"""
Run Unittests + Pip Installation on OSX
:return:
"""
vm_helper = VMHelper("OSX", ssh_port="3022", ssh_username="boss")
vm_helper.start_vm()
python_bin_dir = "/Library/Frameworks/Python.framework/Versions/3.5/bin/"
target_dir = "/tmp/urh"
vm_helper.send_command("rm -rf {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
# Build extensions
rc = vm_helper.send_command("{0}python3 {1}/src/urh/cythonext/build.py".format(python_bin_dir, target_dir))
self.assertEqual(rc, 0)
# Run Unit tests
rc = vm_helper.send_command("{1}py.test {0}/tests".format(target_dir, python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 --no-cache-dir install urh".format(python_bin_dir))
rc = vm_helper.send_command("{0}urh autoclose".format(python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 uninstall --yes urh".format(python_bin_dir))
vm_helper.stop_vm()
|
splotz90/urh
|
tests/TestInstallation.py
|
Python
|
gpl-3.0
| 5,216
|
from __future__ import print_function, division
import sys
import os
import glob
import doctest
from doctest import DocTestParser, Example, SKIP
import gmpy2
# *****************************************************************************
# Test strategy
# -------------
# Tests are divided into two different categories:
#
# 1) The 'txt' files contain doctest style tests. These tests should cover
# basic functionality for all functions/types.
# 2) The 'py' files contain Python code that perform extensive tests, but
# may not test every function.
#
# If run by a debug build of Python, the test suite can be repeated multiple
# times to search for memory leaks.
#
# NOTE: IF THE LAST TEST IN A BLOCK OF TESTS GENERATES AN EXCEPTION, THE
# REFERENCE COUNTING IN A DEBUG BUILD GETS CONFUSED. ALWAYS ENSURE THAT
# AT LEAST ONE VALID TEST IS PERFORMED AFTER AN EXCEPTION IS RAISED!
#
# *****************************************************************************
# Check if this is a debug build of Python.
try:
sys.gettotalrefcount()
debug = True
except AttributeError:
debug = False
# Change repeat to the number of times to repeat each test. Combined with a
# debug build, this can help identify memory leaks.
if debug:
try:
repeat = abs(int(sys.argv[1]))
except:
repeat = 1
else:
repeat = 1
# If mpc version < 1.1.0 gmpy2.root_of_unity is defined and gmpy2.cmp_abs
# doesn't manage complex parameters.
# We create a doctest flag to skip a doctest when mpc version is < 1.1.0
SKIP_MPC_LESS_THAN_110 = doctest.register_optionflag("SKIP_MPC_LESS_THAN_110")
mpc_version_110 = 'root_of_unity' in dir(gmpy2) # True if mpc version >= 1.1.0
SKIP_IN_DEBUG_MODE = doctest.register_optionflag("SKIP_IN_DEBUG_MODE")
class Gmpy2DocTestParser(DocTestParser):
def parse(self, *args, **kwargs):
examples = DocTestParser.parse(self, *args, **kwargs)
for example in examples:
if not isinstance(example, Example):
continue
if not mpc_version_110 and SKIP_MPC_LESS_THAN_110 in example.options:
example.options[SKIP] = True
if debug and SKIP_IN_DEBUG_MODE in example.options:
example.options[SKIP] = True
return examples
parser = Gmpy2DocTestParser()
print()
print("Unit tests for gmpy2 {0} with Python {1}".format(gmpy2.version(), sys.version.split()[0]))
print(" Mutliple-precision library: {0}".format(gmpy2.mp_version()))
print(" Floating-point library: {0}".format(gmpy2.mpfr_version()))
print(" Complex library: {0}".format(gmpy2.mpc_version()))
print(" Caching Values: (Cache size) {0}".format(gmpy2.get_cache()[0]))
print(" Caching Values: (Size in limbs) {0}".format(gmpy2.get_cache()[1]))
print()
if sys.version.startswith('3.1.'):
print("Due to differences in formatting of exceptions and Python 3.x, there")
print("will be test failures for exception handling when the tests are run")
print("with Python 3.1. The doctest module in Python 3.2 and later does not")
print("have this issue.")
print()
input("Press ENTER to continue.. ")
print()
mpz_doctests = ["test_mpz_create.txt", "test_mpz.txt", "test_mpz_io.txt",
"test_mpz_pack_unpack.txt", "test_misc.txt"]
mpq_doctests = ["test_mpq.txt"]
mpfr_doctests = ["test_mpfr_create.txt", "test_mpfr.txt",
"test_mpfr_trig.txt", "test_mpfr_min_max.txt",
"test_context.txt", "test_mpfr_subnormalize.txt"]
# Some tests may differ between MPFR3 and MPFR4.
mpfr_major_version = gmpy2.mpfr_version().split()[1].split('.')[0]
mpfr_version_tests = [os.path.basename(i)
for i in glob.glob(os.path.join(os.path.dirname(__file__),
"test_mpfr" + mpfr_major_version + "*.txt"))]
mpc_doctests = ["test_mpc_create.txt", "test_mpc.txt", "test_mpc_trig.txt"]
gmpy2_tests = [os.path.basename(i)
for i in glob.glob(os.path.join(os.path.dirname(__file__),
"test_gmpy2*.txt"))]
# The following tests will only pass on Python 3.2+.
py32_doctests = ["test_py32_hash.txt"]
failed = 0
attempted = 0
all_doctests = gmpy2_tests + mpz_doctests + mpq_doctests
all_doctests += mpfr_doctests + mpfr_version_tests
all_doctests += mpc_doctests
if sys.version >= "3.2":
all_doctests += py32_doctests
for test in sorted(all_doctests):
if test.endswith("py2.txt") and sys.version >= "3":
continue
if test.endswith("py3.txt") and sys.version < "3":
continue
for r in range(repeat):
result = doctest.testfile(test, globs=globals(),
optionflags=doctest.IGNORE_EXCEPTION_DETAIL |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF,
parser=parser)
print("Results for: {0:25}".format(test.split(".")[0]), end="")
print(" Attempted: {1:4d} Failed: {0:4d}".format(*result), end="")
if debug:
print(" RefCount: {0:6d}".format(sys.gettotalrefcount()))
else:
print()
failed += result[0]
attempted += result[1]
if repeat > 1:
print()
print()
print(" Summary - Attempted: {0:4d} Failed: {1:4d}".format(attempted, failed))
print()
print("Running external test programs.")
print("Running {0:30} ".format("test_pack.py"), end="")
import test_pack
if test_pack.test():
print("successful")
attempted += 1
else:
print("failed")
failed += 1
print("Running {0:30} ".format("test_mpz_args.py"), end="")
import test_mpz_args
if test_mpz_args.test():
print("successful")
attempted += 1
else:
print("failed")
failed += 1
if failed:
sys.exit(1)
else:
sys.exit(0)
|
aleaxit/gmpy
|
test/runtests.py
|
Python
|
lgpl-3.0
| 5,933
|
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, "[fileone] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
patron = '<source src=([^\s]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if not url.startswith("https"):
url = "https:%s" % url
video_urls.append(["[fileone]", url])
return video_urls
|
alfa-addon/addon
|
plugin.video.alfa/servers/fileone.py
|
Python
|
gpl-3.0
| 825
|
# http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/patterns/pair.html
import time
import zmq
port = "5556"
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://*:{}".format(port))
while True:
socket.send_string("Server message to client3")
msg = socket.recv()
print(msg)
time.sleep(1)
|
introprogramming/exercises
|
exercises/chat/online_examples/pairserver.py
|
Python
|
mit
| 343
|
# -*- Mode: Python; test-case-name: flumotion.test.test_bouncers_ipbouncer -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
"""
A bouncer that authenticates based on the IP address of the remote side,
as seen by the bouncer.
"""
from flumotion.common import keycards, messages, errors, log, netutils
from flumotion.common.i18n import N_, gettexter
from flumotion.component.bouncers.algorithms import base
__all__ = ['IPBouncerAlgorithm']
__version__ = "$Rev$"
T_ = gettexter()
class IPBouncerAlgorithm(base.BouncerAlgorithm):
logCategory = 'ip-bouncer'
volatile = False
def get_namespace(self):
return 'ipbouncer'
def start(self, component):
self.props = self.args['properties']
self.deny_default = self.props.get('deny-default', True)
self.allows = netutils.RoutingTable()
self.denies = netutils.RoutingTable()
for p, t in (('allow', self.allows), ('deny', self.denies)):
for s in self.props.get(p, []):
try:
ip, mask = s.split('/')
t.addSubnet(True, ip, int(mask))
except Exception, e:
m = messages.Error(
T_(N_("Invalid value for property %r: %s"), p, s),
log.getExceptionMessage(e),
mid='match-type')
component.addMessage(m)
raise errors.ComponentSetupHandledError()
def authenticate(self, keycard):
ip = keycard.getData()['address']
self.debug('authenticating keycard from requester %s', ip)
if ip is None:
self.warning('could not get address of remote')
allowed = False
elif self.deny_default:
allowed = (self.allows.route(ip)
and not self.denies.route(ip))
else:
allowed = (self.allows.route(ip)
or not self.denies.route(ip))
if not allowed:
self.info('denied login from ip address %s',
keycard.address)
return None
else:
keycard.state = keycards.AUTHENTICATED
self.debug('allowed login from ip address %s',
keycard.address)
return keycard
|
flumotion-mirror/flumotion
|
flumotion/component/bouncers/algorithms/ipbouncer.py
|
Python
|
lgpl-2.1
| 2,879
|
from rest_framework import generics
from bukkake.models import Bukkake
from bukkake.api.serializers import BukkakeSerializer
class BukkakeListView(generics.ListCreateAPIView):
queryset = Bukkake.objects.all()
serializer_class = BukkakeSerializer
permission_classes = ()
authentication_classes = ()
class BukkakeDetailView(generics.RetrieveDestroyAPIView):
queryset = Bukkake.objects.all()
serializer_class = BukkakeSerializer
permission_classes = ()
authentication_classes = ()
|
delitamakanda/BukkakeGramNew
|
bukkake/api/views.py
|
Python
|
mit
| 519
|
from test.test_support import have_unicode, run_unittest
import unittest
class base_set:
def __init__(self, el):
self.el = el
class set(base_set):
def __contains__(self, el):
return self.el == el
class seq(base_set):
def __getitem__(self, n):
return [self.el][n]
class TestContains(unittest.TestCase):
def test_common_tests(self):
a = base_set(1)
b = set(1)
c = seq(1)
self.assertIn(1, b)
self.assertNotIn(0, b)
self.assertIn(1, c)
self.assertNotIn(0, c)
self.assertRaises(TypeError, lambda: 1 in a)
self.assertRaises(TypeError, lambda: 1 not in a)
# test char in string
self.assertIn('c', 'abc')
self.assertNotIn('d', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertRaises(TypeError, lambda: None in 'abc')
if have_unicode:
def test_char_in_unicode(self):
self.assertIn('c', unicode('abc'))
self.assertNotIn('d', unicode('abc'))
self.assertIn('', unicode(''))
self.assertIn(unicode(''), '')
self.assertIn(unicode(''), unicode(''))
self.assertIn('', unicode('abc'))
self.assertIn(unicode(''), 'abc')
self.assertIn(unicode(''), unicode('abc'))
self.assertRaises(TypeError, lambda: None in unicode('abc'))
# test Unicode char in Unicode
self.assertIn(unicode('c'), unicode('abc'))
self.assertNotIn(unicode('d'), unicode('abc'))
# test Unicode char in string
self.assertIn(unicode('c'), 'abc')
self.assertNotIn(unicode('d'), 'abc')
def test_builtin_sequence_types(self):
# a collection of tests on builtin sequence types
a = range(10)
for i in a:
self.assertIn(i, a)
self.assertNotIn(16, a)
self.assertNotIn(a, a)
a = tuple(a)
for i in a:
self.assertIn(i, a)
self.assertNotIn(16, a)
self.assertNotIn(a, a)
class Deviant1:
"""Behaves strangely when compared
This class is designed to make sure that the contains code
works when the list is modified during the check.
"""
aList = range(15)
def __cmp__(self, other):
if other == 12:
self.aList.remove(12)
self.aList.remove(13)
self.aList.remove(14)
return 1
self.assertNotIn(Deviant1(), Deviant1.aList)
class Deviant2:
"""Behaves strangely when compared
This class raises an exception during comparison. That in
turn causes the comparison to fail with a TypeError.
"""
def __cmp__(self, other):
if other == 4:
raise RuntimeError, "gotcha"
try:
self.assertNotIn(Deviant2(), a)
except TypeError:
pass
def test_main():
run_unittest(TestContains)
if __name__ == '__main__':
test_main()
|
ktan2020/legacy-automation
|
win/Lib/test/test_contains.py
|
Python
|
mit
| 3,264
|
#! python3
# -*- coding: utf-8 -*-
'''
自动监测生肉文件夹变化,如果有新文件进入自动启动压制
'''
import sys
def show_exception_and_exit(exc_type, exc_value, tb):
import traceback
traceback.print_exception(exc_type, exc_value, tb)
input('''脚本遇到错误,请截图此画面发送给Kilo19。5秒后继续
Error encountered, please send a screenshot of this error to Kilo19
Press Enter to Exit
''')
import time
time.sleep(5)
sys.excepthook = show_exception_and_exit
import subprocess
import codecs
import time
import sys
import os
import nvksupport
from settings import cloudRoot, cloudDir, customDownDir
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
def GrabArgs(lines, argLen):
returnVal = [''] * argLen
for x in range(len(returnVal)):
if len(lines) > x:
returnVal[x] = lines[x].strip()
return returnVal
def ListIngredients(dirPath, name, ext):
myIngredients = set()
if os.path.isdir(dirPath):
for root, dirs, files in os.walk(dirPath):
if root == dirPath:
for file in files:
if not name in file and file[-3:] == ext:
fileStr = os.path.join(root, file) + '\n'
myIngredients.add(fileStr)
return myIngredients
def ListFile(dirPath, name, ext):
candidates = set()
if os.path.isdir(dirPath):
for root, dirs, files in os.walk(dirPath):
for file in files:
if file[-3:] == ext and name in file:
fileStr = os.path.join(root, file) + '\n'
candidates.add(fileStr)
return candidates
def SafeReadandWriteHead(inName, fileHead = None):
lines = []
if not os.path.isfile(inName):
codecs.open(inName, 'w', 'utf-8').close()
else:
inf = codecs.open(inName, 'r', 'utf-8')
lines = inf.readlines()
inf.close()
lines = [line.strip() + '\n' for line in lines]
if fileHead:
fileHead = [l.strip() + '\n' for l in fileHead]
if len(lines) < len(fileHead) or lines[:len(fileHead)] != fileHead:
print("writehead: " + time.ctime())
inf = codecs.open(inName, 'w', 'utf-8')
inf.writelines(fileHead)
inf.close()
x = 0
while x < len(lines):
if lines[x][0] == '#':
lines.pop(x)
else:
x += 1
return lines
def DeamonHelper(inName, argLen, scriptName, fileHead):
inName = os.path.join(customDownDir, inName + '.txt')
lines = SafeReadandWriteHead(inName, fileHead)
if len(lines):
inf = codecs.open(inName, 'w+', 'utf-8')
if fileHead:
fileHead = [l.strip() + '\n' for l in fileHead]
inf.writelines(fileHead)
else:
inf.write('')
inf.close()
while lines:
args = GrabArgs(lines, argLen)
lines = lines[len(args):]
print(inName + ' command: ' + time.ctime())
for arg in args:
print(arg)
# For safety
time.sleep(0.5)
subprocess.Popen(
[
'py', '-3', os.path.join(scriptDir, scriptName + '.py')
] + args,
creationflags = subprocess.CREATE_NEW_CONSOLE
)
if __name__ == "__main__":
print("NVK Deamon online: " + time.ctime())
ingHead = ['#网盘高压下令文件',
'#不要删除开头带#的行,这些都是注释',
'#不要使用OC网页版编辑此文件',
'#此文件为UTF-8编码,用来下令压生肉',
'#下令前请先用everything搜索(任务栏那个放大镜)',
'#优先下载1080p',
'#搜索时按Alt+3然后Alt+4可以缩放everything窗口',
'#生肉速度大约视频1/2时长',
'#同步到网盘速度不等']
beHead = ['#压熟肉下令文件,带#的行都是注释',
'#请在BUG检测完成后再压制',
'#不要使用OC网页版编辑此文件',
'#教程https://docs.google.com/document/d/1CdyqEP7eanL6J6xUpbcOuq-uIBQkSEqhhHgeGFT94ik/edit',
'#此文件为UTF-8编码,用来下令压熟肉',
'#熟肉速度大约视频0.8-0.9倍',
'#请同时提供生肉路径与字幕所在网盘文件夹,然后保存',
'#同步到网盘速度不等']
cookedHead = ['#此文件专门记录熟肉路径,自动更新',
'#请优先从已有熟肉中选择上传',
'#上传完成后尽快删除']
while True:
DeamonHelper("ing", 1, "NixieCloud_Enc", ingHead)
DeamonHelper("BE", 3, "Bili_Enc", beHead)
ingredientsListPath = os.path.join(
scriptDir, 'ingredientsList' + '.txt'
)
ingredients = ListIngredients(customDownDir, '[BE_', 'mp4')
cookedPath = os.path.join(customDownDir, 'cookedList' + '.txt')
cooked = ListFile(os.path.join(cloudRoot, cloudDir), '[BE_', 'mp4')
oldIngList = set(SafeReadandWriteHead(ingredientsListPath, None))
oldcookedList = set(SafeReadandWriteHead(cookedPath, None))
if ingredients != oldIngList:
#set difference finds new ingredients to transcode
newIngs = list(ingredients - oldIngList)
print('Auto NCE: ' + time.ctime())
for newIng in newIngs:
print(newIng)
newIngStripped = newIng.strip()
newIngStripped = nvksupport.ReplaceNaughtyCharacters(
newIngStripped, nvksupport.tl
)
subprocess.Popen(
[
'py', '-3',
os.path.join(scriptDir, "NixieCloud_Enc" + '.py'),
newIngStripped
],
creationflags=subprocess.CREATE_NEW_CONSOLE
)
ingredients = ListIngredients(customDownDir, '[BE_', 'mp4')
print("writeIng: " + time.ctime())
outFile = codecs.open(ingredientsListPath, 'w', 'utf-8')
outFile.writelines(list(ingredients))
outFile.close()
if cooked != oldcookedList:
print("writeCooked: " + time.ctime())
codecs.open(cookedPath, 'w', 'utf-8').close()
SafeReadandWriteHead(cookedPath, cookedHead)
outFile = codecs.open(cookedPath, 'a+', 'utf-8')
outFile.writelines(list(cooked))
outFile.close()
time.sleep(5)
|
Kilo19/NixieVideoKit
|
nvkdeamon.py
|
Python
|
mit
| 5,537
|
import click
from cloudcompose.ecs.controller import Controller
from cloudcompose.config import CloudConfig
from cloudcompose.exceptions import CloudComposeException
@click.group()
def cli():
pass
@cli.command()
@click.option('--upgrade-image/--no-upgrade-image', default=False, help="Upgrade the image to the newest version instead of keeping the cluster consistent")
def up(upgrade_image):
"""
updates cluster configuration
"""
try:
cloud_config = CloudConfig(upgrade_image=upgrade_image)
controller = Controller(cloud_config)
controller.cluster_up()
except CloudComposeException as ex:
print((ex.message))
@cli.command()
@click.option('--force/--no-force', default=False, help="Force the cluster to go down even if terminate protection is enabled")
def down(force):
"""
destroy ECS cluster
"""
try:
cloud_config = CloudConfig()
controller = Controller(cloud_config)
controller.cluster_down(force)
except CloudComposeException as ex:
print((ex.message))
@cli.command()
@click.option('--verbose/--no-verbose', default=False, help="Output detailed health check information")
def health(verbose):
"""
check ECS cluster health
"""
try:
cloud_config = CloudConfig()
controller = Controller(cloud_config)
name = cloud_config.config_data('cluster')['name']
healthy = controller.cluster_health(verbose)
if healthy:
print(("{} is healthy".format(name)))
else:
print(("{} is unhealthy".format(name)))
except CloudComposeException as ex:
print((ex.message))
@cli.command()
@click.option('--single-step/--no-single-step', default=False, help="Perform only one upgrade step and then exit")
@click.option('--upgrade-image/--no-upgrade-image', default=True, help="Upgrade the image to the newest version instead of keeping the cluster consistent")
def upgrade(single_step, upgrade_image):
"""
upgrade the ECS cluster
"""
try:
cloud_config = CloudConfig()
controller = Controller(cloud_config, upgrade_image=upgrade_image)
controller.upgrade(single_step)
except CloudComposeException as ex:
print((ex.message))
@cli.command()
def cleanup():
"""
deletes launch configs and auto scaling group
"""
try:
cloud_config = CloudConfig()
controller = Controller(cloud_config)
controller.cleanup()
except CloudComposeException as ex:
print((ex.message))
|
cloud-compose/cloud-compose-ecs
|
cloudcompose/ecs/commands/cli.py
|
Python
|
mit
| 2,554
|
def add(a,b):
"""
Add the two input parameters together.
Parameters:
-----------
a - int
the first number to add
b - int
the second number to add
"""
return a+b
|
evanjbowling/playground
|
lng/py/simple_package/apackage/math.py
|
Python
|
mit
| 210
|
# -*- coding: utf-8 -*-
import os
import sys
import datetime
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import SIGNAL, SLOT
from models import *
initDB()
b = Repo.get_by_or_init(path=u"/Users/johannes")
f = Soundfile.get_by_or_init(repo=b, file_path=u"test2.wav")
m = RepoModel()
repo = m.add_repo(u"/Users/johannes/samples/misc")
print f.path
f.desc=u"Fappafdpokasdok"
f.get_data_from_file()
#for tags in [u"foo", u"treo"]:
# t = Tag.get_by_or_init(name=tags)
# f.tags.append(t)
sm = SoundfileModel()
#session.delete(repo)
m.scan_repo(repo)
print [s.path for s in Soundfile.query.all()]
#print "foo", f.search(u"desc=Faspp", u"foo", u"channels=2")
#print "foo", f.search( u"foo")
f.taglist = [u"rar"]
print f.tags
session.commit()
for t in Tag.query.all():
print t.name
|
jpburstrom/sampleman
|
tests.py
|
Python
|
gpl-3.0
| 798
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------
'''
Created on 24 Jan 2016
@author: Seko
@summary: Logger
'''
#---------------------------------------------------------------------
# ____________________ I M P O R T ____________________
import xbmc
# ____________________ C L A S S ____________________
class Logger:
__ADDON_NAME__ = None
__MODULE_NAME__ = None
def __init__(self, addonName=None, moduleName=None):
"""
Constructor
@param addonName: The addon name
@param moduleName: The module name
"""
if addonName:
self.__ADDON_NAME__ = addonName
if moduleName:
self.__MODULE_NAME__ = moduleName
def _getPrefixMsg(self):
"""
Method to get the prefix log message
"""
prefix = ""
if self.__ADDON_NAME__ or self.__MODULE_NAME__:
prefix +="["
if self.__ADDON_NAME__ :
prefix +=""+self.__ADDON_NAME__
if self.__ADDON_NAME__ and self.__MODULE_NAME__:
prefix +=" - "
if self.__MODULE_NAME__ :
prefix +=self.__MODULE_NAME__
if self.__ADDON_NAME__ or self.__MODULE_NAME__:
prefix +="] "
return prefix
def log(self,msg,level=None):
"""
Log function
"""
#newMsg = strUtil.toUTF8(msg)
if level is not None:
xbmc.log(self._getPrefixMsg()+str(msg), level)
else:
xbmc.log(self._getPrefixMsg()+str(msg))
|
Seko34/Kodi-Development
|
script.module.core.ultrastream/resources/lib/logger.py
|
Python
|
gpl-3.0
| 1,810
|
'''
Problem 030
Surprisingly there are only three numbers that can be written as the sum of
fourth powers of their digits:
1634 = 14 + 64 + 34 + 44
8208 = 84 + 24 + 04 + 84
9474 = 94 + 44 + 74 + 44
As 1 = 14 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers
of their digits.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
def solve_problem(power):
max_int = power * (9 ** power)
answer = 0
for number in range(2,max_int):
digit_sum = 0
for digit in str(number):
digit_sum += (int(digit) ** power)
if digit_sum == number:
answer += number
return(answer)
if __name__ == "__main__":
print(solve_problem(5))
|
daveinnyc/various
|
project_euler/030.digit_5th_powers.py
|
Python
|
mit
| 825
|
from django import forms
from tablemanager.models import Workspace
from livelayermanager.models import Datasource,Layer,SqlViewLayer
from borg_utils.form_fields import GeoserverSettingForm,MetaTilingFactorField,GridSetField
from borg_utils.form_fields import GroupedModelChoiceField,BorgSelect
from borg_utils.forms import BorgModelForm
class DatasourceForm(GeoserverSettingForm,BorgModelForm):
"""
A form for Datasource model
"""
max_connections = forms.IntegerField(label="Max concurrent connections",initial=10,min_value=1,max_value=128)
max_connections.setting_type = "geoserver_setting"
max_connections.key = "max connections"
connect_timeout = forms.IntegerField(label="Connect timeout in seconds",initial=30,min_value=1,max_value=3600)
connect_timeout.setting_type = "geoserver_setting"
connect_timeout.key = "Connection timeout"
min_connections = forms.IntegerField(label="Min concurrent connections",initial=1,min_value=1,max_value=128)
min_connections.setting_type = "geoserver_setting"
min_connections.key = "min connections"
max_connection_idle_time = forms.IntegerField(label="Max connection idle time",initial=300,min_value=1)
max_connection_idle_time.setting_type = "geoserver_setting"
max_connection_idle_time.key = "Max connection idle time"
fetch_size = forms.IntegerField(label="Fetch size",initial=1000,min_value=1)
fetch_size.setting_type = "geoserver_setting"
fetch_size.key = "fetch size"
workspace = GroupedModelChoiceField('publish_channel',queryset=Workspace.objects.all(),required=True,choice_family="workspace",choice_name="workspace_choices",widget=BorgSelect())
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(DatasourceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['workspace'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(DatasourceForm,self)._post_clean()
class Meta:
model = Datasource
fields = "__all__"
class LayerForm(GeoserverSettingForm,BorgModelForm):
"""
A form for Layer model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial={"enabled":True})
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(LayerForm, self).__init__(*args, **kwargs)
self.fields['table'].widget.attrs['readonly'] = True
instance = kwargs.get("instance")
if instance and instance.is_published:
self.fields['name'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(LayerForm,self)._post_clean()
class Meta:
model = Layer
fields = "__all__"
class SqlViewLayerForm(GeoserverSettingForm,BorgModelForm):
"""
A form for SqlViewLayer model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial={"enabled":True})
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(SqlViewLayerForm, self).__init__(*args, **kwargs)
instance = kwargs.get("instance")
if instance and instance.is_published:
self.fields['name'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(SqlViewLayerForm,self)._post_clean()
class Meta:
model = SqlViewLayer
fields = "__all__"
|
rockychen-dpaw/borgcollector
|
livelayermanager/forms.py
|
Python
|
bsd-3-clause
| 5,053
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import nadam
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import training_util
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testAdaptiveLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(1.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
# var0 = [1., 2.] - 1.0 * [5, 5]
self.assertAllClose([-4., -3.], self.evaluate(var0))
# var1 = [3., 4.] - 1.0 * [3, 3]
self.assertAllClose([0., 1.], self.evaluate(var1))
sgd.learning_rate = 0.5
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
# Validate updated params
# var0 = [-4., -3.] - 0.5 * [5, 5]
self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
# var1 = [0., 1.] - 0.5 * [3, 3]
self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
@test_util.run_in_graph_and_eager_modes
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
grad_loss = constant_op.constant([42, -42], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: constant_op.constant(5.0)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(
array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
# Run convert_ops to achieve the gradients converting
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd.apply_gradients(converted_grads_and_vars)
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
with self.cached_session():
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd.apply_gradients(grads_and_vars)
@test_util.run_in_graph_and_eager_modes
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testIterationWithoutMinimize(self):
with self.cached_session():
sgd = gradient_descent.SGD(3.0)
self.evaluate(sgd.iterations.initializer)
self.assertEqual(0, self.evaluate(sgd.iterations))
@test_util.run_in_graph_and_eager_modes
def testConfig(self):
with self.cached_session():
opt = gradient_descent.SGD(learning_rate=1.0)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
lr = opt._get_hyper('learning_rate')
lr2 = opt2._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
# assert both are equal float values.
self.assertEqual(self.evaluate(lr), self.evaluate(lr2))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
opt3 = gradient_descent.SGD.from_config(config)
lr3 = opt3._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
@test_util.run_in_graph_and_eager_modes
def testConfigWithLearningRateDecay(self):
with self.cached_session():
decay_schedule = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.1)
step = 10
opt = gradient_descent.SGD(decay_schedule)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertAllEqual(
decay_schedule(step),
opt._get_hyper('learning_rate')(step))
self.assertAllEqual(
decay_schedule(step),
opt2._get_hyper('learning_rate')(step))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertAllEqual(
self.evaluate(opt._get_hyper('learning_rate')(step)),
opt3._get_hyper('learning_rate')(step))
@test_util.run_in_graph_and_eager_modes
def testGradClipValue(self):
with self.cached_session():
var = resource_variable_ops.ResourceVariable([1.0, 2.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0., 1.], self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testGradClipNorm(self):
with self.cached_session():
var = resource_variable_ops.ResourceVariable([1.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.], self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testInvalidClipNorm(self):
with self.assertRaisesRegexp(ValueError, '>= 0'):
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
@test_util.run_in_graph_and_eager_modes
def testInvalidKwargs(self):
with self.assertRaisesRegexp(TypeError, 'Unexpected keyword argument'):
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
@test_util.run_in_graph_and_eager_modes
def testWeights(self):
with self.cached_session():
opt1 = adam.Adam(learning_rate=1.0)
var1 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss1 = lambda: 3 * var1
opt_op_1 = opt1.minimize(loss1, [var1])
self.evaluate(variables.global_variables_initializer())
config = opt1.get_config()
opt2 = adam.Adam.from_config(config)
var2 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss2 = lambda: 3 * var2
opt_op_2 = opt2.minimize(loss2, [var2])
weights = opt1.get_weights()
# Assert set_weights and both variables get updated to same value.
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_1, opt_op_2])
self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
self.assertEqual(1, self.evaluate(opt1.iterations))
self.assertEqual(1, self.evaluate(opt2.iterations))
var3 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0],
dtype=dtypes.float32)
var4 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0],
dtype=dtypes.float32)
loss3 = lambda: 3 * var3 + 5 * var4
opt_op_3 = opt1.minimize(loss3, [var3, var4])
# Assert set_weights with ValueError since weight list does not match.
self.evaluate(variables.global_variables_initializer())
weights = opt1.get_weights()
with self.assertRaisesRegexp(ValueError, 'but the optimizer was'):
opt2.set_weights(weights)
# Assert set_weights and variables get updated to same value.
var5 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0],
dtype=dtypes.float32)
var6 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0],
dtype=dtypes.float32)
loss4 = lambda: 3 * var5 + 5 * var6
opt_op_4 = opt2.minimize(loss4, [var5, var6])
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_3, opt_op_4])
self.assertAllClose(
self.evaluate([var3, var4]), self.evaluate([var5, var6]))
@test_util.run_in_graph_and_eager_modes
def testGettingHyperParameters(self):
opt = adam.Adam(learning_rate=1.0)
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
lr = self.evaluate(opt.lr)
self.assertEqual(1.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(3.0))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
with self.assertRaises(AttributeError):
opt.not_an_attr += 3
@test_util.run_in_graph_and_eager_modes
def testGettingHyperParametersWithLrInConstructor(self):
opt = gradient_descent.SGD(lr=3.0)
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertTrue(isinstance(opt.lr, resource_variable_ops.ResourceVariable))
self.assertTrue(
isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(4.0))
lr = self.evaluate(opt.lr)
self.assertEqual(4.0, lr)
@test_util.run_in_graph_and_eager_modes
def testOptimizerWithKerasModel(self):
a = input_layer.Input(shape=(3,), name='input_a')
b = input_layer.Input(shape=(3,), name='input_b')
dense = core.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = core.Dropout(0.5, name='dropout')(c)
model = training.Model([a, b], [d, e])
optimizer = gradient_descent.SGD(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@test_util.run_in_graph_and_eager_modes
def testOptimizerWithCallbacks(self):
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name='input_a')
model = sequential.Sequential()
model.add(core.Dense(4, name='dense'))
model.add(core.Dropout(0.5, name='dropout'))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss='mse', metrics=['mae'])
# This does not reduce the LR after the first epoch (due to low delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
# This should reduce the LR after the first epoch (due to high delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def testOptimizerSetIterations(self):
global_step = training_util.get_or_create_global_step()
opt = adam.Adam(learning_rate=1.0)
opt.iterations = global_step
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
init_step_value = self.evaluate(global_step)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
new_step_value = self.evaluate(global_step)
self.assertEqual(new_step_value, init_step_value + 1)
def testVarKey(self):
with context.graph_mode():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
self.assertTrue(a._in_graph_mode)
self.assertTrue(b._in_graph_mode)
var_key = optimizer_v2._var_key(a)
self.assertEqual('var', var_key)
var_key = optimizer_v2._var_key(b)
self.assertEqual('var_1', var_key)
@keras_parameterized.run_with_all_model_types
class OptimizersCompatibilityTest(keras_parameterized.TestCase):
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
np.random.seed(1331)
with self.cached_session():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = keras.utils.to_categorical(y)
num_hidden = 5
model_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v1.compile(opt_v1, loss='categorical_crossentropy', metrics=[])
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v2.set_weights(model_v1.get_weights())
model_v2.compile(opt_v2, loss='categorical_crossentropy', metrics=[])
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
rtol=1e-5, atol=1e-5)
self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
rtol=1e-5, atol=1e-5)
def testAdadeltaCompatibility(self):
opt_v1 = optimizers.Adadelta(lr=0.01)
opt_v2 = adadelta.Adadelta(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdagradCompatibility(self):
opt_v1 = optimizers.Adagrad(lr=0.01)
opt_v2 = adagrad.Adagrad(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamCompatibility(self):
opt_v1 = optimizers.Adam()
opt_v2 = adam.Adam()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamaxCompatibility(self):
opt_v1 = optimizers.Adamax(lr=0.01)
opt_v2 = adamax.Adamax(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testNadamCompatibility(self):
opt_v1 = optimizers.Nadam(lr=0.001)
opt_v2 = nadam.Nadam(learning_rate=0.001)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testMomentumCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01, momentum=0.9)
opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testRMSpropCompatibility(self):
opt_v1 = optimizers.RMSprop()
opt_v2 = rmsprop.RMSprop()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testSGDCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01)
opt_v2 = gradient_descent.SGD(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
np.random.seed(1331)
with self.cached_session():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = keras.utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
model_tf = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_tf.set_weights(model_k_v2.get_weights())
opt_k_v1 = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
opt_tf = momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.9, use_nesterov=True)
model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[])
model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[])
model_tf.compile(opt_tf, loss='categorical_crossentropy', metrics=[])
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def testNumericEquivalenceForAmsgrad(self):
np.random.seed(1331)
with self.cached_session():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = keras.utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
opt_k_v1 = optimizers.Adam(amsgrad=True)
opt_k_v2 = adam.Adam(amsgrad=True)
model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[])
model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[])
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
# Note: These tests are kept in a separate class to avoid bugs in some
# distributions of Python that break AutoGraph which is used by tf.function.
class OptimizerWithFunctionTest(test.TestCase):
def testBasic(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@def_function.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0., 1.], fn(), atol=1e-4)
self.assertAllClose([-1, 0.], fn(), atol=1e-4)
def testVarKeyWithVarCreatedInEager(self):
with context.eager_mode():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
@test_util.also_run_as_tf_function
def var_key_test():
self.assertFalse(a._in_graph_mode)
self.assertFalse(b._in_graph_mode)
var_key_a = optimizer_v2._var_key(a)
self.assertStartsWith(var_key_a, 'var_')
var_key_b = optimizer_v2._var_key(b)
self.assertStartsWith(var_key_b, 'var_')
self.assertNotEquals(var_key_a, var_key_b)
var_key_test()
if __name__ == '__main__':
test.main()
|
jendap/tensorflow
|
tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py
|
Python
|
apache-2.0
| 30,534
|
import quex.engine.state_machine.index as sm_index
from quex.engine.generator.skipper.common import line_counter_in_loop, \
end_delimiter_is_subset_of_indentation_counter_newline, \
get_character_sequence, \
get_on_skip_range_open, \
line_column_counter_in_loop
from quex.engine.generator.languages.address import __nice, get_label
import quex.engine.generator.languages.variable_db as variable_db
from quex.blackboard import setup as Setup
from quex.engine.misc.string_handling import blue_print
import quex.blackboard as blackboard
from quex.blackboard import E_StateIndices
from copy import copy
def do(Data):
ClosingSequence = Data["closer_sequence"]
ModeName = Data["mode_name"]
assert type(ModeName) in [str, unicode]
assert Data.has_key("indentation_counter_terminal_id")
indentation_counter_terminal_id = Data["indentation_counter_terminal_id"]
Mode = None
if ModeName != "":
Mode = blackboard.mode_db[ModeName]
code_str, db = get_skipper(ClosingSequence, Mode, indentation_counter_terminal_id)
return code_str, db
template_str = """
$$DELIMITER_COMMENT$$
text_end = QUEX_NAME(Buffer_text_end)(&me->buffer);
$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$
$$ENTRY$$
QUEX_BUFFER_ASSERT_CONSISTENCY(&me->buffer);
__quex_assert(QUEX_NAME(Buffer_content_size)(&me->buffer) >= Skipper$$SKIPPER_INDEX$$L );
/* NOTE: If _input_p == end of buffer, then it will drop out immediately out of the
* loop below and drop into the buffer reload procedure. */
/* Loop eating characters: Break-out as soon as the First Character of the Delimiter
* (FCD) is reached. Thus, the FCD plays also the role of the Buffer Limit Code. There
* are two reasons for break-out:
* (1) we reached a limit (end-of-file or buffer-limit)
* (2) there was really the FCD in the character stream
* This must be distinguished after the loop was exited. But, during the 'swallowing' we
* are very fast, because we do not have to check for two different characters. */
*text_end = Skipper$$SKIPPER_INDEX$$[0]; /* Overwrite BufferLimitCode (BLC). */
_$$SKIPPER_INDEX$$_LOOP:
$$INPUT_GET$$
$$IF_INPUT_EQUAL_DELIMITER_0$$
goto _$$SKIPPER_INDEX$$_LOOP_EXIT;
$$ENDIF$$
$$LC_COUNT_IN_LOOP$$
$$INPUT_P_INCREMENT$$ /* Now, BLC cannot occur. See above. */
goto _$$SKIPPER_INDEX$$_LOOP;
_$$SKIPPER_INDEX$$_LOOP_EXIT:
*text_end = QUEX_SETTING_BUFFER_LIMIT_CODE; /* Reset BLC. */
/* Case (1) and (2) from above can be distinguished easily:
*
* (1) Distance to text end == 0:
* End-of-File or Buffer-Limit.
* => goto to drop-out handling
*
* (2) Else:
* First character of delimit reached.
* => For the verification of the tail of the delimiter it is
* essential that it is loaded completely into the buffer.
* For this, it must be required:
*
* Distance to text end >= Delimiter length
*
* _input_p end
* | | end - _input_p >= 3
* [ ][R][E][M][#]
*
* The case of reload should be seldom and is costy anyway.
* Thus let's say, that in this case we simply enter the drop
* out and start the search for the delimiter all over again.
*
* (2.1) Distance to text end < Delimiter length
* => goto to drop-out handling
* (2.2) Start detection of tail of delimiter
*
*/
if( QUEX_NAME(Buffer_distance_input_to_text_end)(&me->buffer) < (ptrdiff_t)Skipper$$SKIPPER_INDEX$$L ) {
/* (2.1) Reload required. */
goto $$GOTO_RELOAD$$;
}
$$LC_ON_FIRST_DELIMITER$$
/* (2.2) Test the remaining delimiter, but note, that the check must restart at '_input_p + 1'
* if any later check fails. */
$$INPUT_P_INCREMENT$$
/* Example: Delimiter = '*', '/'; if we get ...[*][*][/]... then the the first "*" causes
* a drop out out of the 'swallowing loop' and the second "*" will mismatch
* the required "/". But, then the second "*" must be presented to the
* swallowing loop and the letter after it completes the 'match'.
* (The whole discussion, of course, is superflous if the range delimiter has length 1.) */
$$DELIMITER_REMAINDER_TEST$$
{
/* NOTE: The initial state does not increment the input_p. When it detects that
* it is located on a buffer border, it automatically triggers a reload. No
* need here to reload the buffer. */
$$LC_COUNT_END_PROCEDURE$$
/* No need for re-entry preparation. Acceptance flags and modes are untouched after skipping. */
$$GOTO_AFTER_END_OF_SKIPPING$$ /* End of range reached. */
}
$$RELOAD$$:
QUEX_BUFFER_ASSERT_CONSISTENCY_LIGHT(&me->buffer);
/* -- When loading new content it is checked that the beginning of the lexeme
* is not 'shifted' out of the buffer. In the case of skipping, we do not care about
* the lexeme at all, so do not restrict the load procedure and set the lexeme start
* to the actual input position. */
$$MARK_LEXEME_START$$
$$LC_COUNT_BEFORE_RELOAD$$
/* -- According to case (2.1) is is possible that the _input_p does not point to the end
* of the buffer, thus we record the current position in the lexeme start pointer and
* recover it after the loading. */
me->buffer._input_p = text_end;
if( QUEX_NAME(Buffer_is_end_of_file)(&me->buffer) == false ) {
QUEX_NAME(buffer_reload_forward)(&me->buffer, (QUEX_TYPE_CHARACTER_POSITION*)position, PositionRegisterN);
/* Recover '_input_p' from lexeme start
* (inverse of what we just did before the loading) */
$$INPUT_P_TO_LEXEME_START$$
/* After reload, we need to increment _input_p. That's how the game is supposed to be played.
* But, we recovered from lexeme start pointer, and this one does not need to be incremented. */
text_end = QUEX_NAME(Buffer_text_end)(&me->buffer);
$$LC_COUNT_AFTER_RELOAD$$
QUEX_BUFFER_ASSERT_CONSISTENCY(&me->buffer);
$$GOTO_ENTRY$$
}
/* Here, either the loading failed or it is not enough space to carry a closing delimiter */
$$INPUT_P_TO_LEXEME_START$$
$$ON_SKIP_RANGE_OPEN$$
"""
def get_skipper(EndSequence, Mode=None, IndentationCounterTerminalID=None, OnSkipRangeOpenStr=""):
assert type(EndSequence) == list
assert len(EndSequence) >= 1
assert map(type, EndSequence) == [int] * len(EndSequence)
local_variable_db = {}
global template_str
LanguageDB = Setup.language_db
# Name the $$SKIPPER$$
skipper_index = sm_index.get()
# Determine the $$DELIMITER$$
delimiter_str, delimiter_comment_str = get_character_sequence(EndSequence)
delimiter_length = len(EndSequence)
tmp = []
LanguageDB.COMMENT(tmp, " Delimiter: %s" % delimiter_comment_str)
delimiter_comment_str = "".join(tmp)
# Determine the check for the tail of the delimiter
delimiter_remainder_test_str = ""
if len(EndSequence) != 1:
txt = ""
i = 0
for letter in EndSequence[1:]:
i += 1
txt += " %s\n" % LanguageDB.ASSIGN("input", LanguageDB.INPUT_P_DEREFERENCE(i-1))
txt += " %s" % LanguageDB.IF_INPUT("!=", "Skipper$$SKIPPER_INDEX$$[%i]" % i)
txt += " %s" % LanguageDB.GOTO(skipper_index)
txt += " %s" % LanguageDB.END_IF()
delimiter_remainder_test_str = txt
if not end_delimiter_is_subset_of_indentation_counter_newline(Mode, EndSequence):
goto_after_end_of_skipping_str = LanguageDB.GOTO(E_StateIndices.ANALYZER_REENTRY)
else:
# If there is indentation counting involved, then the counter's terminal id must
# be determined at this place.
assert IndentationCounterTerminalID is not None
# If the ending delimiter is a subset of what the 'newline' pattern triggers
# in indentation counting => move on to the indentation counter.
goto_after_end_of_skipping_str = LanguageDB.GOTO_TERMINAL(IndentationCounterTerminalID)
if OnSkipRangeOpenStr != "": on_skip_range_open_str = OnSkipRangeOpenStr
else: on_skip_range_open_str = get_on_skip_range_open(Mode, EndSequence)
# The main part
code_str = blue_print(template_str,
[
["$$DELIMITER_COMMENT$$", delimiter_comment_str],
["$$INPUT_P_INCREMENT$$", LanguageDB.INPUT_P_INCREMENT()],
["$$INPUT_P_DECREMENT$$", LanguageDB.INPUT_P_DECREMENT()],
["$$INPUT_GET$$", LanguageDB.ACCESS_INPUT()],
["$$IF_INPUT_EQUAL_DELIMITER_0$$", LanguageDB.IF_INPUT("==", "Skipper$$SKIPPER_INDEX$$[0]")],
["$$ENDIF$$", LanguageDB.END_IF()],
["$$ENTRY$$", LanguageDB.LABEL(skipper_index)],
["$$RELOAD$$", get_label("$reload", skipper_index)],
["$$GOTO_ENTRY$$", LanguageDB.GOTO(skipper_index)],
["$$INPUT_P_TO_LEXEME_START$$", LanguageDB.INPUT_P_TO_LEXEME_START()],
# When things were skipped, no change to acceptance flags or modes has
# happend. One can jump immediately to the start without re-entry preparation.
["$$GOTO_AFTER_END_OF_SKIPPING$$", goto_after_end_of_skipping_str],
["$$MARK_LEXEME_START$$", LanguageDB.LEXEME_START_SET()],
["$$DELIMITER_REMAINDER_TEST$$", delimiter_remainder_test_str],
["$$ON_SKIP_RANGE_OPEN$$", on_skip_range_open_str],
])
# Line and column number counting
code_str, reference_p_f = __lc_counting_replacements(code_str, EndSequence)
# The finishing touch
code_str = blue_print(code_str,
[["$$SKIPPER_INDEX$$", __nice(skipper_index)],
["$$GOTO_RELOAD$$", get_label("$reload", skipper_index)]])
if reference_p_f:
variable_db.enter(local_variable_db, "reference_p", Condition="QUEX_OPTION_COLUMN_NUMBER_COUNTING")
variable_db.enter(local_variable_db, "Skipper%i", "{ %s }" % delimiter_str, delimiter_length, Index=skipper_index)
variable_db.enter(local_variable_db, "Skipper%iL", "%i" % delimiter_length, Index=skipper_index)
variable_db.enter(local_variable_db, "text_end")
return code_str, local_variable_db
def __lc_counting_replacements(code_str, EndSequence):
"""Line and Column Number Counting(Range Skipper):
-- in loop if there appears a newline, then do:
increment line_n
set position from where to count column_n
-- at end of skipping do one of the following:
if end delimiter contains newline:
column_n = number of letters since last new line in end delimiter
increment line_n by number of newlines in end delimiter.
(NOTE: in this case the setting of the position from where to count
the column_n can be omitted.)
else:
column_n = current_position - position from where to count column number.
NOTE: On reload we do count the column numbers and reset the column_p.
"""
LanguageDB = Setup.language_db
def get_character_n_after_last_newline(Sequence):
tmp = copy(Sequence)
tmp.reverse()
try: return tmp.index(ord('\n'))
except: return -1
char_n_after_last_newline = get_character_n_after_last_newline(EndSequence)
reference_p_def = ""
in_loop = ""
end_procedure = ""
before_reload = ""
after_reload = ""
on_first_delimiter = ""
reference_p_required_f = False
# Line/Column Counting:
newline_number_in_delimiter = EndSequence.count(ord('\n'))
if EndSequence == map(ord, "\n") or EndSequence == map(ord, "\r\n"):
# (1) If the end-delimiter is a newline
# => there cannot appear a newline inside the comment
# => IN LOOP: no line number increment
# no reference pointer required for column counting
end_procedure += " __QUEX_IF_COUNT_COLUMNS_SET((size_t)1);\n"
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
else:
# (2) If end-delimiter is NOT newline
# => there can appear a newline inside the comment
if newline_number_in_delimiter == 0:
# -- no newlines in delimiter => line and column number
# must be counted.
in_loop = line_column_counter_in_loop()
end_procedure = " __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
" - reference_p));\n"
reference_p_required_f = True
else:
# -- newline inside delimiter => line number must be counted
# column number is fixed.
end_procedure = " __QUEX_IF_COUNT_COLUMNS_SET((size_t)%i);\n" \
% (char_n_after_last_newline + 1)
if EndSequence[0] == ord('\n') \
or len(EndSequence) > 1 and EndSequence[0:2] == [ord('\r'), ord('\n')]:
# If the first character in the sequence is newline, then the line counting
# may is prevented by the loop exit. Now, we need to count.
on_first_delimiter = "/* First delimiter char was a newline */\n" + \
" __QUEX_IF_COUNT_LINES_ADD((size_t)1);\n"
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % (newline_number_in_delimiter - 1)
else:
in_loop = line_counter_in_loop()
end_procedure += " __QUEX_IF_COUNT_LINES_ADD((size_t)%i);\n" % newline_number_in_delimiter
if reference_p_required_f:
reference_p_def = " __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
before_reload = " __QUEX_IF_COUNT_COLUMNS_ADD((size_t)(QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer)\n" + \
" - reference_p));\n"
after_reload = " __QUEX_IF_COUNT_COLUMNS(reference_p = QUEX_NAME(Buffer_tell_memory_adr)(&me->buffer));\n"
if len(EndSequence) > 1:
end_procedure = "%s\n%s" % (LanguageDB.INPUT_P_ADD(len(EndSequence)-1), end_procedure)
return blue_print(code_str,
[["$$LC_COUNT_COLUMN_N_POINTER_DEFINITION$$", reference_p_def],
["$$LC_COUNT_IN_LOOP$$", in_loop],
["$$LC_COUNT_END_PROCEDURE$$", end_procedure],
["$$LC_COUNT_BEFORE_RELOAD$$", before_reload],
["$$LC_COUNT_AFTER_RELOAD$$", after_reload],
["$$LC_ON_FIRST_DELIMITER$$", on_first_delimiter],
]), \
reference_p_required_f
|
coderjames/pascal
|
quex-0.63.1/quex/engine/generator/skipper/range.py
|
Python
|
bsd-2-clause
| 16,427
|
import re
import sys
import os
import time
from ruuvitag_sensor.url_decoder import UrlDecoder
mac_regex = '[0-9a-f]{2}([:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$'
if not sys.platform.startswith('linux') or os.environ.get('CI') == 'True':
# Use BleCommunicationDummy also for CI as it can't use gattlib
from ruuvitag_sensor.ble_communication import BleCommunicationDummy
ble = BleCommunicationDummy()
else:
from ruuvitag_sensor.ble_communication import BleCommunicationNix
ble = BleCommunicationNix()
# TODO: Split this class to common functions and RuuviTagSensor
class RuuviTagSensor(object):
def __init__(self, mac):
if not re.match(mac_regex, mac.lower()):
raise ValueError('{} is not valid mac address'.format(mac))
self._mac = mac
self._state = {}
self._data = None
@property
def mac(self):
return self._mac
@property
def state(self):
return self._state
@staticmethod
def get_data(mac):
raw = ble.get_data(mac)
return RuuviTagSensor.convert_data(raw)
@staticmethod
def convert_data(raw):
"""
Convert hexadcimal data to string and validate that data is from RuuviTag.
Encoded data part is after ruu.vi/# or r/
Returns:
Encoded sensor data part in string
"""
try:
# TODO: Fix conversion so convered data will show https://ruu.vi/# and htts://r/
# Now it has e.g. ▲☻☺♠♥♥■■►♥ruu.vi/#AjwYAMFc
base16_split = [raw[i:i + 2] for i in range(0, len(raw), 2)]
selected_hexs = filter(lambda x: int(x, 16) < 128, base16_split)
characters = [chr(int(c, 16)) for c in selected_hexs]
data = ''.join(characters)
# take only part after ruu.vi/# or r/
index = data.find('ruu.vi/#')
if index > -1:
return data[(index + 8):]
else:
index = data.find('r/')
if index > -1:
return data[(index + 2):]
return None
except:
return None
@staticmethod
def find_ruuvitags():
"""
Find all RuuviTags. Function will print the mac and the state of the sensors when found.
Function will execute as long as it is stopped. Stop ecexution with Crtl+C.
Returns:
Dictionary containing mac and state of found sensors
"""
print('Finding RuuviTags. Stop with Ctrl+C.')
datas = dict()
for ble_data in ble.get_datas():
# If mac already in datas continue
if ble_data[0] in datas:
continue
encoded = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid ruuvitag data it is sensor data
if encoded is not None:
state = UrlDecoder().decode_data(encoded)
if state is not None:
datas[ble_data[0]] = state
print(ble_data[0])
print(state)
return datas
@staticmethod
def get_data_for_sensors(macs, search_duratio_sec=5):
"""
Get lates data for sensors in the macs list.
Args:
macs: List of mac addresses
search_duratio_sec: Search duration in seconds. Default 5.
Returns:
Dictionary containing mac and state of found sensors
"""
print('Get latest data for sensors. Search duration is {}s'.format(
search_duratio_sec))
print('MACs: {}'.format(macs))
start_time = time.time()
datas = dict()
data_iter = ble.get_datas()
for ble_data in data_iter:
if time.time() - start_time > search_duratio_sec:
data_iter.send(StopIteration)
break
# If mac in whitelist
if not ble_data[0] in macs:
continue
encoded = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid ruuvitag data it is sensor data
if encoded is not None:
state = UrlDecoder().decode_data(encoded)
if state is not None:
datas[ble_data[0]] = state
return datas
def update(self):
"""
Get lates data from the sensor and update own state.
Returns:
Latest state
"""
data = RuuviTagSensor.get_data(self._mac)
if data == self._data:
return self._state
self._data = data
if self._data is None:
self._state = {}
else:
self._state = UrlDecoder().decode_data(self._data)
return self._state
|
juhi24/ilmaruuvi
|
ruuvitag_sensor/ruuvi.py
|
Python
|
mit
| 4,803
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "veganmekan.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
kemalbsoylu/veganmekan
|
manage.py
|
Python
|
mit
| 808
|
import os
from git import Repo
import tempfile
import uuid
import datetime
from .Base import BasePublisher
from shuttl.database import db
class GitPublisher(BasePublisher):
##Id of the directory. Because of the way inheritance is set up in sqlalchemy, this is a foriegnKey
id = db.Column(db.Integer, db.ForeignKey('base_publisher.id'), primary_key=True)
protocol = "GIT"
privateKeyPath = db.Column(db.String)
__mapper_args__ = {
'polymorphic_identity':'git_publisher',
}
## Sets up the connection to the remote server
def _setUpConnection(self):
jobID = str(uuid.uuid4())
self.baseDir = os.path.join("/tmp/", jobID)
os.mkdir(self.baseDir)
id_file = os.path.expanduser(self.privateKeyPath)
self.ssh_cmd = 'ssh -i %s' % id_file
self.repo = Repo.init(self.baseDir)
with self.repo.git.custom_environment(GIT_SSH_COMMAND=self.ssh_cmd):
self.origin = self.repo.create_remote('origin', self.hostname)
self.origin.fetch()
self.repo.create_head('master', self.origin.refs.master).set_tracking_branch(self.origin.refs.master)
pass
pass
@property
def publicKey(self):
path = os.path.expanduser("{0}.pub".format(self.privateKeyPath))
content = ""
with file(path, "r") as fi:
content = fi.read()
pass
return content
## Destroys the connection
def destroyConnection(self):
self.repo.index.commit("Published from Shuttl on {0}".format(datetime.datetime.now()))
with self.repo.git.custom_environment(GIT_SSH_COMMAND=self.ssh_cmd):
self.origin.push(force=True)
pass
pass
## Publishes a fileObject (required)
# \param file the file to publish
def publishFile(self, file):
filePath = file.fullPath[1:]
filePath = os.path.join(self.baseDir, filePath)
with open(filePath, "wb+") as fi:
content = file.buildContent(
website=file.website,
page=file,
organization=file.website.organization,
publisher=self
)
if type(content) == str:
content = content.encode()
pass
fi.write(content)
pass
self.repo.index.add([filePath])
pass
## Publishes a directory before the files (optional)
# \param object the object to publish
def publishDirectory(self, directory):
dirPath = directory.fullPath[1:]
dirPath = os.path.join(self.baseDir, dirPath)
try:
os.mkdir(dirPath)
pass
except FileExistsError:
#the directory already exsists. Let's just ignore this.
pass
pass
|
shuttl-io/shuttl
|
shuttl/Models/Publishers/GitPublisher.py
|
Python
|
mit
| 2,827
|
from rnndatasets.synthetic.binding.binding import *
|
PFCM/datasets
|
rnndatasets/synthetic/binding/__init__.py
|
Python
|
bsd-3-clause
| 52
|
#!/usr/bin/env python
__author__ = "Moxie Marlinspike"
__email__ = "moxie@thoughtcrime.org"
__license__= """
Copyright (c) 2009 Moxie Marlinspike <moxie@thoughtcrime.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
"""
import os, sys
from knockknock.Profiles import Profiles
from knockknock.Profile import Profile
DAEMON_DIR = '/etc/knockknock.d/'
PROFILES_DIR = DAEMON_DIR + 'profiles/'
def usage():
print "knockknock-genprofile <profileName> <knockPort>"
sys.exit(3)
def checkProfile(profileName):
if (os.path.isdir(PROFILES_DIR + profileName)):
print "Profile already exists. First rm " + PROFILES_DIR + profileName + "/"
sys.exit(0)
def checkPortConflict(knockPort):
if (not os.path.isdir(PROFILES_DIR)):
return
profiles = Profiles(PROFILES_DIR)
matchingProfile = profiles.getProfileForPort(knockPort)
if (matchingProfile != None):
print "A profile already exists for knock port: " + str(knockPort) + " at this location: " + matchingProfile.getDirectory()
def createDirectory(profileName):
if not os.path.isdir(DAEMON_DIR):
os.mkdir(DAEMON_DIR)
if not os.path.isdir(PROFILES_DIR):
os.mkdir(PROFILES_DIR)
if not os.path.isdir(PROFILES_DIR + profileName):
os.mkdir(PROFILES_DIR + profileName)
def main(argv):
if len(argv) != 2:
usage()
profileName = argv[0]
knockPort = argv[1]
checkProfile(profileName)
checkPortConflict(knockPort)
createDirectory(profileName)
random = open('/dev/urandom', 'rb')
cipherKey = random.read(16)
macKey = random.read(16)
counter = 0
profile = Profile(PROFILES_DIR + profileName, cipherKey, macKey, counter, knockPort)
profile.serialize()
random.close()
print "Keys successfully generated in " + PROFILES_DIR + profileName
if __name__ == '__main__':
main(sys.argv[1:])
|
vejeshv/main_project
|
knockknock-genprofile.py
|
Python
|
gpl-3.0
| 2,557
|
import json
import os.path
import unittest.mock as mock
from tests.plugins import PluginTestCase
import plugins.weather
f = "plugins.weather.get_owm_data"
def get_json(state):
path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"files",
"openweathermap_" + state + ".json",
)
with open(path, "r") as f:
return json.load(f)
class WeatherPluginTest(PluginTestCase):
def create_plugin(self):
return plugins.weather.WeatherPlugin(self.bot, self.channel)
@mock.patch(f, return_value=get_json("edinburgh"))
def test_cmd_simple(self, mock):
ret = self.cmd("weather edinburgh")
self.assertEqual(
"Weather in Edinburgh, GB: light rain - temperature: 17.64°C - wind: 1.5m/s",
ret,
)
@mock.patch(f, return_value=get_json("tel_aviv"))
def test_cmd_multiword_city(self, mock):
ret = self.cmd("weather tel aviv")
self.assertEqual(
"Weather in Tel Aviv District, IL: few clouds - temperature: 33.11°C - wind: 5.7m/s",
ret,
)
@mock.patch(f, return_value=get_json("404"))
def test_not_found(self, mock):
ret = self.cmd("weather asljkhajkhf")
self.assertEqual("Error: City not found", ret)
|
anlutro/botologist
|
tests/plugins/weather_test.py
|
Python
|
mit
| 1,289
|
#!/usr/bin/env python
# flake8: noqa
from ansible import errors
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import StringIO
def get_host_ips(topo):
host_public_ips = []
for group in topo['gcloud_gce_res']:
for instance in group['instance_data']:
host_public_ips.append(instance['public_ip'])
return host_public_ips
def get_layout_hosts(inv):
return inv['hosts'].keys()
def get_layout_host_groups(inv):
return inv['host_groups'].keys()
def add_sections(config, section_list):
for section in section_list:
config.add_section(section)
return config
def set_children(config, inv):
for host_group in inv['host_groups']:
if "children" in inv['host_groups'][host_group]:
config.add_section(host_group+":"+"children")
for child in inv['host_groups'][host_group]['children']:
config.set(host_group+":"+"children", child)
return config
def set_vars(config, inv):
for host_group in inv['host_groups']:
if "vars" in inv['host_groups'][host_group]:
config.add_section(host_group+":"+"vars")
for var in inv['host_groups'][host_group]['vars']:
config.set(host_group+":"+"vars",
var,
inv['host_groups'][host_group]['vars'][var])
return config
def add_ips_to_groups(config, inven_hosts, layout):
# create a ip to host mapping based on count
ip_to_host = {}
for host_name in layout['hosts']:
count = layout['hosts'][host_name]['count']
host_list = []
for i in range(0, count):
item = inven_hosts.pop()
host_list.append(item)
ip_to_host[host_name] = host_list
# add ips to the host groups in inventory
for host_name in layout['hosts']:
host_ips = ip_to_host[host_name]
for ip in host_ips:
for host_group in layout['hosts'][host_name]['host_groups']:
config.set(host_group, ip)
return config
def add_common_vars(config, host_groups, layout):
common_vars = layout['vars']
for group in host_groups:
items = dict(config.items(group)).keys()
config.remove_section(group)
config.add_section(group)
for item in items:
host_string = item
for var in common_vars:
if common_vars[var] == "__IP__":
host_string += " " + var + "=" + item + " "
config.set(group, host_string)
return config
def gcloud_inventory(topo, layout):
inventory = ConfigParser(allow_no_value=True)
no_of_groups = len(topo['gcloud_gce_res'])
layout_hosts = get_layout_hosts(layout)
inven_hosts = get_host_ips(topo)
# adding sections to respective host groups
host_groups = get_layout_host_groups(layout)
inventory = add_sections(inventory, host_groups)
# set children for each host group
inventory = set_children(inventory, layout)
# set vars for each host group
inventory = set_vars(inventory, layout)
# add ip addresses to each host
inventory = add_ips_to_groups(inventory, inven_hosts, layout)
inventory = add_common_vars(inventory, host_groups, layout)
output = StringIO.StringIO()
inventory.write(output)
return output.getvalue()
class FilterModule(object):
''' A filter to fix interface's name format '''
def filters(self):
return {
'gcloud_inventory': gcloud_inventory
}
|
agharibi/linchpin
|
linchpin/provision/filter_plugins/gcloud_inventory.py
|
Python
|
gpl-3.0
| 3,571
|
#!/usr/bin/env python
import numpy as np;
import sys;
import string;
import numpy.linalg as lg
import argparse as ap
atb=1.88971616463
parser=ap.ArgumentParser(description="Parse polarisation from Gaussian logfile")
parser.add_argument("-f","--logfile", help="Gaussian logfile")
args=parser.parse_args()
inputfile=args.logfile
check=False
with open (inputfile,"r") as f:
for line in f:
if "Exact polarizability" in line :
check=True
polarstring=(line.split(":")[1]);
print polarstring
xx=float(polarstring[0:8])
xy=float(polarstring[8:16])
yy=float(polarstring[16:24])
xz=float(polarstring[24:32])
yz=float(polarstring[32:40])
zz=float(polarstring[40:58])
polartensor=np.array([[xx,xy,xz],[xy,yy,yz],[xz,yz,zz]])
if check==False:
print "There is no polarisability in file. Leaving"
sys.exit()
else:
polartensorangstrom=polartensor/(atb**3)
polartensorangstromdiag=np.diag(lg.eigvalsh(polartensorangstrom))
print "Read in string"
print polarstring
print "Convert to tensor"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} {5:4.4f}".format(polartensor[0,0],polartensor[0,1],polartensor[0,2],polartensor[1,1],polartensor[1,2],polartensor[2,2])
print "Polarisability tensor in A^3, non diagonal"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} {5:4.4f}".format(polartensorangstrom[0,0],polartensorangstrom[0,1],polartensorangstrom[0,2],polartensorangstrom[1,1],polartensorangstrom[1,2],polartensorangstrom[2,2])
print "Diagonal tensor in A^3"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} 0.0 0.0 {1:4.4f} 0.0 {2:4.4f}".format(polartensorangstromdiag[0,0],polartensorangstromdiag[1,1],polartensorangstromdiag[2,2])
|
12AngryMen/votca-scripts
|
Gaussian/Gaussian_parse_polar.py
|
Python
|
apache-2.0
| 2,005
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2005, Tim Potter <tpot@samba.org>
# Copyright 2006 John-Mark Gurney <gurney_j@resnet.uroegon.edu>
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006,2007,2008,2009 Frank Scholz <coherence@beebits.net>
# Copyright 2016 Erwan Martin <public@fzwte.net>
#
# Implementation of a SSDP server.
#
import random
import time
import socket
import logging
from email.utils import formatdate
from errno import ENOPROTOOPT
SSDP_PORT = 1900
SSDP_ADDR = '239.255.255.250'
SERVER_ID = 'Wifi 104 SSDP Server'
logger = logging.getLogger('ssdp')
logger.setLevel('WARNING')
class SSDPServer:
"""A class implementing a SSDP server. The notify_received and
searchReceived methods are called when the appropriate type of
datagram is received by the server."""
known = {}
def __init__(self):
self.sock = None
def run(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except socket.error as le:
# RHEL6 defines SO_REUSEPORT but it doesn't work
if le.errno == ENOPROTOOPT:
pass
else:
raise
addr = socket.inet_aton(SSDP_ADDR)
interface = socket.inet_aton('0.0.0.0')
cmd = socket.IP_ADD_MEMBERSHIP
self.sock.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
self.sock.bind(('0.0.0.0', SSDP_PORT))
self.sock.settimeout(1)
while True:
try:
data, addr = self.sock.recvfrom(1024)
self.datagram_received(data, addr)
except socket.timeout:
continue
self.shutdown()
def shutdown(self):
for st in self.known:
if self.known[st]['MANIFESTATION'] == 'local':
self.do_byebye(st)
def datagram_received(self, data, host_port):
"""Handle a received multicast datagram."""
(host, port) = host_port
try:
header, payload = data.decode().split('\r\n\r\n')[:2]
except ValueError as err:
logger.error(err)
return
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
logger.info('SSDP command %s %s - from %s:%d' % (cmd[0], cmd[1], host, port))
logger.debug('with headers: {}.'.format(headers))
if cmd[0] == 'M-SEARCH' and cmd[1] == '*':
# SSDP discovery
self.discovery_request(headers, (host, port))
elif cmd[0] == 'NOTIFY' and cmd[1] == '*':
# SSDP presence
logger.debug('NOTIFY *')
else:
logger.warning('Unknown SSDP command %s %s' % (cmd[0], cmd[1]))
def register(self, manifestation='', usn='', st='', location='', server=SERVER_ID, cache_control='max-age=1800', silent=False,
host=None):
"""Register a service or device that this SSDP server will
respond to."""
if not manifestation or not usn or not st or not location:
raise Exception('invalid arguments to register SSDP server')
logging.info('Registering %s (%s)' % (st, location))
self.known[usn] = {}
self.known[usn]['USN'] = usn
self.known[usn]['LOCATION'] = location
self.known[usn]['ST'] = st
self.known[usn]['EXT'] = ''
self.known[usn]['SERVER'] = server
self.known[usn]['CACHE-CONTROL'] = cache_control
self.known[usn]['MANIFESTATION'] = manifestation
self.known[usn]['SILENT'] = silent
self.known[usn]['HOST'] = host
self.known[usn]['last-seen'] = time.time()
if manifestation == 'local' and self.sock:
self.do_notify(usn)
def unregister(self, usn):
logger.info("Un-registering %s" % usn)
del self.known[usn]
def is_known(self, usn):
return usn in self.known
def send_it(self, response, destination, delay, usn):
logger.debug('send discovery response delayed by %ds for %s to %r' % (delay, usn, destination))
try:
self.sock.sendto(response.encode(), destination)
except (AttributeError, socket.error) as msg:
logger.warning("failure sending out byebye notification: %r" % msg)
def discovery_request(self, headers, host_port):
"""Process a discovery request. The response must be sent to
the address specified by (host, port)."""
(host, port) = host_port
logger.info('Discovery request from (%s,%d) for %s' % (host, port, headers['st']))
logger.info('Discovery request for %s' % headers['st'])
# Do we know about this service?
for i in self.known.values():
if i['MANIFESTATION'] == 'remote':
continue
if headers['st'] == 'ssdp:all' and i['SILENT']:
continue
if i['ST'] == headers['st'] or headers['st'] == 'ssdp:all':
response = ['HTTP/1.1 200 OK']
usn = None
for k, v in i.items():
if k == 'USN':
usn = v
if k not in ('MANIFESTATION', 'SILENT', 'HOST'):
response.append('%s: %s' % (k, v))
if usn:
response.append('DATE: %s' % formatdate(timeval=None, localtime=False, usegmt=True))
response.extend(('', ''))
delay = random.randint(0, int(headers['mx']))
self.send_it('\r\n'.join(response), (host, port), delay, usn)
def do_notify(self, usn):
"""Do notification"""
if self.known[usn]['SILENT']:
return
logger.info('Sending alive notification for %s' % usn)
resp = [
'NOTIFY * HTTP/1.1',
'HOST: %s:%d' % (SSDP_ADDR, SSDP_PORT),
'NTS: ssdp:alive',
]
stcpy = dict(self.known[usn].items())
stcpy['NT'] = stcpy['ST']
del stcpy['ST']
del stcpy['MANIFESTATION']
del stcpy['SILENT']
del stcpy['HOST']
del stcpy['last-seen']
resp.extend(map(lambda x: ': '.join(x), stcpy.items()))
resp.extend(('', ''))
logger.debug('do_notify content', resp)
try:
self.sock.sendto('\r\n'.join(resp).encode(), (SSDP_ADDR, SSDP_PORT))
self.sock.sendto('\r\n'.join(resp).encode(), (SSDP_ADDR, SSDP_PORT))
except (AttributeError, socket.error) as msg:
logger.warning("failure sending out alive notification: %r" % msg)
def do_byebye(self, usn):
"""Do byebye"""
logger.info('Sending byebye notification for %s' % usn)
resp = [
'NOTIFY * HTTP/1.1',
'HOST: %s:%d' % (SSDP_ADDR, SSDP_PORT),
'NTS: ssdp:byebye',
]
try:
stcpy = dict(self.known[usn].items())
stcpy['NT'] = stcpy['ST']
del stcpy['ST']
del stcpy['MANIFESTATION']
del stcpy['SILENT']
del stcpy['HOST']
del stcpy['last-seen']
resp.extend(map(lambda x: ': '.join(x), stcpy.items()))
resp.extend(('', ''))
logger.debug('do_byebye content', resp)
if self.sock:
try:
self.sock.sendto('\r\n'.join(resp), (SSDP_ADDR, SSDP_PORT))
except (AttributeError, socket.error) as msg:
logger.error("failure sending out byebye notification: %r" % msg)
except KeyError as msg:
logger.error("error building byebye notification: %r" % msg)
|
bluesliverx/smartthings-src
|
apps/wifi-104-ssdp-server/lib/ssdp.py
|
Python
|
apache-2.0
| 8,142
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
From my blog post:
<http://tanghaibao.blogspot.com/2010/02/getting-phylogeny-from-list-of.html>
Example:
>>> mylist = [3702, 3649, 3694, 3880]
>>> t = TaxIDTree(mylist)
>>> print t
(((Carica_papaya,Arabidopsis_thaliana)Brassicales,(Medicago_truncatula,Populus_trichocarpa)fabids)rosids);
>>> t.print_tree()
<BLANKLINE>
/-Carica_papaya
/---|
| \-Arabidopsis_thaliana
---- /---|
| /-Medicago_truncatula
\---|
\-Populus_trichocarpa
"""
import sys
import time
import logging
from urllib2 import urlopen, URLError, HTTPError
from ClientForm import ParseResponse
from BeautifulSoup import BeautifulSoup
from jcvi.utils.cbook import memoized
from jcvi.apps.base import OptionParser, ActionDispatcher
URL = "http://itol.embl.de/other_trees.shtml"
class TaxIDTree(object):
def __init__(self, list_of_taxids):
# If only one taxid provided, get full tree with nameExp
# else, get default tree
if isinstance(list_of_taxids, int): # single taxon
list_of_taxids = [list_of_taxids]
form_element_id = "nameExp"
else:
form_element_id = "nameCol"
# the data to send in
form_data = "\n".join(str(x) for x in list_of_taxids)
success = False
while not success:
try:
response = urlopen(URL)
success = True
except (URLError, HTTPError, RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
forms = ParseResponse(response, backwards_compat=False)
form = forms[0]
form["ncbiIDs"] = form_data
page = urlopen(form.click()).read()
soup = BeautifulSoup(page)
self.newick = ""
for element in soup("textarea"):
if element["id"] == form_element_id:
self.newick = str(element.contents[0])
if self.newick == "":
print soup
def __str__(self):
return self.newick
def print_tree(self):
from ete2 import Tree
t = Tree(self.newick, format=8)
print t
def get_names(list_of_taxids):
"""
>>> mylist = [3702, 3649, 3694, 3880]
>>> get_names(mylist)
['Arabidopsis thaliana', 'Carica papaya', 'Populus trichocarpa', 'Medicago truncatula']
"""
from jcvi.apps.fetch import batch_taxonomy
list_of_taxids = [str(x) for x in list_of_taxids]
return list(batch_taxonomy(list_of_taxids))
def get_taxids(list_of_names):
"""
>>> mylist = ['Arabidopsis thaliana', 'Carica papaya']
>>> get_taxids(mylist)
[1, 2]
"""
from jcvi.apps.fetch import batch_taxids
return [int(x) for x in batch_taxids(list_of_names)]
def MRCA(list_of_taxids):
"""
This gets the most recent common ancester (MRCA) for a list of taxids
>>> mylist = [3702, 3649, 3694, 3880]
>>> MRCA(mylist)
'rosids'
"""
from ete2 import Tree
t = TaxIDTree(list_of_taxids)
t = Tree(str(t), format=8)
ancestor = t.get_common_ancestor(*t.get_leaves())
return ancestor.name
@memoized
def isPlantOrigin(taxid):
"""
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
"""
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid))
def main():
actions = (
('newick', 'query a list of IDs to newick'),
('test', 'test taxonomy module'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def test(args):
print "Testing isPlantOrigin():"
print 3702, isPlantOrigin(3702) # Arabidopsis thaliana
print 10090, isPlantOrigin(10090) # Mus musculus
print "\nTest cache by 10K calls:"
for i in xrange(10000):
isPlantOrigin(3702)
isPlantOrigin(10090)
print "done"
print "\nTest invalid ID:"
print 10099, isPlantOrigin(10099) # Wrong ID
def newick(args):
"""
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
"""
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print get_taxids(mylist)
t = TaxIDTree(mylist)
print t
if __name__ == '__main__':
main()
|
sgordon007/jcvi_062915
|
utils/taxonomy.py
|
Python
|
bsd-2-clause
| 4,675
|
# -*- coding: utf-8 -*-
"""Normalizes access to the HTTP libraries.
"""
from __future__ import absolute_import, unicode_literals, division
from six.moves import http_client as client
from .request import Request
from .response import Response
from . import exceptions
__all__ = [
'client',
'Request',
'Response',
'exceptions'
]
try:
# Attempt to get additional status codes (added in python 3.2)
getattr(client, 'PERMANENT_REDIRECT')
getattr(client, 'PRECONDITION_REQUIRED')
getattr(client, 'TOO_MANY_REQUESTS')
getattr(client, 'REQUEST_HEADER_FIELDS_TOO_LARGE')
getattr(client, 'NETWORK_AUTHENTICATION_REQUIRED')
except AttributeError:
# Don't have em; add them.
client.PERMANENT_REDIRECT = 308
client.PRECONDITION_REQUIRED = 428
client.TOO_MANY_REQUESTS = 429
client.REQUEST_HEADER_FIELDS_TOO_LARGE = 431
client.NETWORK_AUTHENTICATION_REQUIRED = 511
|
armet/python-armet
|
armet/http/__init__.py
|
Python
|
mit
| 919
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
doRasterize.py
---------------------
Date : June 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'June 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL, QFileInfo, QTextCodec
from PyQt4.QtGui import QWidget, QMessageBox, QErrorMessage
from ui_widgetRasterize import Ui_GdalToolsWidget as Ui_Widget
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BasePluginWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.setupUi(self)
BasePluginWidget.__init__(self, self.iface, "gdal_rasterize")
self.outSelector.setType( self.outSelector.FILE )
# set the default QSpinBoxes and QProgressBar value
self.widthSpin.setValue(3000)
self.heightSpin.setValue(3000)
self.horizresSpin.setValue(1)
self.vertresSpin.setValue(1)
self.lastEncoding = Utils.getLastUsedEncoding()
self.setParamsStatus([
(self.inSelector, SIGNAL("filenameChanged()")),
(self.outSelector, SIGNAL("filenameChanged()")),
(self.attributeComboBox, SIGNAL("currentIndexChanged(int)")),
( [self.widthSpin, self.heightSpin], SIGNAL( "valueChanged(int)" )),
( [self.horizresSpin, self.vertresSpin], SIGNAL( "valueChanged(double)" ))
])
self.connect(self.inSelector, SIGNAL("selectClicked()"), self.fillInputFileEdit)
self.connect(self.outSelector, SIGNAL("selectClicked()"), self.fillOutputFileEdit)
self.connect(self.inSelector, SIGNAL("layerChanged()"), self.fillFieldsCombo)
self.connect(self.radioSetSize, SIGNAL("toggled(bool)"), self.someValueChanged)
self.connect(self.radioSetResolution, SIGNAL("toggled(bool)"), self.someValueChanged)
def onLayersChanged(self):
self.inSelector.setLayers( Utils.LayerRegistry.instance().getVectorLayers() )
def fillFieldsCombo(self):
if self.inSelector.layer() is None:
return
self.lastEncoding = self.inSelector.layer().dataProvider().encoding()
self.loadFields( self.getInputFileName() )
def fillInputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedVectorFilter()
inputFile, encoding = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the input file for Rasterize" ), Utils.FileFilter.allVectorsFilter(), lastUsedFilter, True)
if not inputFile:
return
Utils.FileFilter.setLastUsedVectorFilter(lastUsedFilter)
self.inSelector.setFilename(inputFile)
self.lastEncoding = encoding
self.loadFields( inputFile )
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
# rasterize supports output file creation for GDAL 1.8
gdalVersion = Utils.GdalConfig.versionNum()
if gdalVersion >= 1800:
fileDialogFunc = Utils.FileDialog.getSaveFileName
filters = Utils.FileFilter.saveRastersFilter()
else:
fileDialogFunc = Utils.FileDialog.getOpenFileName
filters = Utils.FileFilter.allRastersFilter()
outputFile = fileDialogFunc(self, self.tr( "Select the raster file to save the results to" ), filters, lastUsedFilter)
if not outputFile:
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outSelector.setFilename(outputFile)
# required either -ts or -tr to create the output file
if gdalVersion >= 1800:
if not QFileInfo(outputFile).exists():
QMessageBox.information( self, self.tr( "Output size or resolution required" ), self.tr( "The output file doesn't exist. You must set up the output size or resolution to create it." ) )
self.radioSetSize.setChecked(True)
def getArguments(self):
arguments = []
if self.attributeComboBox.currentIndex() >= 0:
arguments.append( "-a" )
arguments.append( self.attributeComboBox.currentText() )
if self.radioSetSize.isChecked():
arguments.append( "-ts" )
arguments.append( self.widthSpin.value() )
arguments.append( self.heightSpin.value() )
if self.radioSetResolution.isChecked():
arguments.append( "-tr" )
arguments.append( self.horizresSpin.value() )
arguments.append( self.vertresSpin.value() )
inputFn = self.getInputFileName()
if inputFn:
arguments.append( "-l" )
arguments.append( QFileInfo( inputFn ).baseName() )
arguments.append( inputFn )
arguments.append( self.getOutputFileName() )
return arguments
def getInputFileName(self):
return self.inSelector.filename()
def getOutputFileName(self):
return self.outSelector.filename()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
def loadFields(self, vectorFile):
self.attributeComboBox.clear()
if not vectorFile:
return
try:
(fields, names) = Utils.getVectorFields(vectorFile)
except Utils.UnsupportedOGRFormat, e:
QErrorMessage(self).showMessage( e.args[0] )
self.inSelector.setLayer( None )
return
ncodec = QTextCodec.codecForName(self.lastEncoding)
for name in names:
self.attributeComboBox.addItem( ncodec.toUnicode(name) )
|
herow/planning_qgis
|
python/plugins/GdalTools/tools/doRasterize.py
|
Python
|
gpl-2.0
| 6,287
|
import math
class ColorPoint:
"""
Simple color-storage class; stores way-points on a color ramp
"""
def __init__(self,idx,col,colType):
# index, X-coordinate, on a palette
self.idx = idx
# color; usually an RGBA quad
self.color = col
# One of ColorTypes members
self.colorType = colType
def __str__(self):
return "(Index=%d; Color=(%0.3f,%0.3f,%0.3f,%0.3f); ColorType=%d)" % (self.idx, self.color[0], self.color[1], self.color[2], self.color[3], self.colorType)
class ColorTypes:
"""
Simple enumerated type for internal color formats
"""
RGBAi = 0
RGBAf = 1
HEX6 = 2
HEX8 = 3
class ColorRamp:
"""
Model for a simple color ramp
See __main__ below for usage.
"""
# we assume linear ramps for now
LINEAR = 0
GAUSSIAN = 1
EXPONENTIAL = 2
CIRCLE_RADIUS = 3
def __init__(self, nColors, *args, **kwargs):
# size of this ramp
self.nColors = nColors
# the list of RGBA float values
self.ramp = []
# ordered array of color indices
self.keys = {}
# ready to use; boolean; we need at least two
# color points to define a ramp
self.ready = False
#
if 'handle' in kwargs:
self.handle = kwargs['handle']
if 'name' in kwargs:
self.name = kwargs['name']
# list of unique ids for objects on the map canvas
self.canvas_ids = {}
def __str__(self):
"""
instances created with ColorRamp(nColors=XYZ,name="foo") will return "foo"
otherwise a long-debug-friendly description is returned.
"""
if getattr(self,'name',None)!=None:
return self.name
else:
s = "Object Name: Nameless\n"
s+= "Ready to use: " + str(self.ready) + "\n"
s+= "Keys: " + str(self.keys.keys()) + "\n"
for k in self.keys:
s += "Color[%d] = %s\n" % (k,self.keys[k])
s += "ColorRamp with %d colors follows...\n" % self.nColors
if self.ready:
s += str(self.getRamp()) + "\n"
else:
s += "[]\n"
return s
def addColor(self, idx, col, colType=ColorTypes.RGBAf, colScale=1.0):
"""
adds color, 'col', to ramp at index 'idx'. If 'idx' exists, this
function overwrites the value
"""
# check user input: color location
# if beyond ends of ramp, make end of ramp
if idx<0:
idx=0
elif idx>self.nColors-1:
idx=self.nColors-1
# check user input: color format
if type(col) != ().__class__ or len(col)!=4:
print "Error: Colors must be spefied as a RGBA tuple with four values."
print "Error: %s was given instead." % str(col)
return
# check user input: color type format
if colType not in (ColorTypes.RGBAi, ColorTypes.RGBAf):
print "Error: Color type specification must be either, "
print "Error: ColorRamp.RGBAi or ColorRamp.RGBAf"
return
userCol = None
# convert color type if needed
if colType==ColorTypes.RGBAf:
userCol = col
elif colType==ColorTypes.RGBAi:
userCol = map(lambda c: float(c)/float(colScale), col)
# create a ColorPoint and insert it
self.keys[idx] = ColorPoint(idx, userCol, colType)
# is this ramp yet good to use?
self.updateReady()
# what else do we need to do to modify the model?
def checkPoint(self, pt, startX, X):
"""
Checks if there is a point between startX and X.
"""
ret_x = startX
if startX < X:
for x in range(int(startX)+1, int(X)+1):
if x in self.keys:
break
ret_x = x
elif startX > X:
for x in range(int(startX)-1, int(X)-1, -1):
if x in self.keys:
break
ret_x = x
return ret_x
def getPoint(self, pt):
"""
Returns a true index (horizontal potision) of a given point.
"""
if pt in self.canvas_ids:
return self.canvas_ids[pt]
return None
def getRampList(self):
"""
Returns a list of floats representing the color ramp.
"""
ramp_list = []
for x in range(0,360):
if x in self.keys:
col = list(self.keys[x].color)
ramp_list.append(float(x))
ramp_list.append(float(col[0]))
ramp_list.append(float(col[1]))
ramp_list.append(float(col[2]))
ramp_list.append(float(col[3]))
return ramp_list
def movePoint(self,pt,X,alpha):
if pt not in self.canvas_ids:
# print "Error: Could not move pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx in self.keys:
col = list(self.keys[idx].color)
else:
# print "Logic error no such index in self.keys"
return
col[3] = alpha
# prevent extreme points from being replaced
if X <= 0:
return
if X >= 359:
return
self.removeColor(idx)
# prevent extreme points from moving horizontally
if idx == 0:
X = 0
if idx == 359:
X = 359
self.addColor(X, tuple(col))
def removePoint(self,pt):
if pt not in self.canvas_ids:
# print "Error: Could not remove pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx <= 0 or idx >= 359:
return
self.removeColor(idx)
def removeColor(self, idx):
# check user input
if idx not in self.keys: return
if idx<0 or idx>self.nColors-1: return
# remove the point
del self.keys[idx]
# is this ramp still good to use?
self.updateReady()
def updateReady(self):
# are we ready to use?
self.ready = (0 in self.keys and self.nColors-1 in self.keys)
def updateRamp(self):
# if idx is specified then it was either added or removed
# so adjust the ramp about that point
if not self.ready:
# no use in updating a ramp w/o proper colors
print "Msg: This color ramp is not yet ready to use. Please add"
print "Msg: at least two colors at the ramp's extreme points 0 and %d" % (self.nColors-1)
return
# OPTIMIZATION TODO:
# if idx!=None and idx no in self.keys, then the point
# was removed, just update around those pts
# if idx!=None and does exists in self.keys, then they
# just added this point, so update the pts around it
self.ramp = []
keyList = self.keys.keys()
keyList.sort()
keyList.reverse()
lowerId = keyList.pop()
while len(keyList)>0:
upperId = keyList.pop()
# number of colors in between
span = int(abs(upperId-lowerId))
# get the actual colors
lowerCol, upperCol = self.keys[lowerId].color, self.keys[upperId].color
for x in range(span):
# linear mixing components
cUpper = float(x) / float(span)
cLower = 1.0 - cUpper
self.ramp.append((cLower * lowerCol[0] + cUpper * upperCol[0],
cLower * lowerCol[1] + cUpper * upperCol[1],
cLower * lowerCol[2] + cUpper * upperCol[2],
cLower * lowerCol[3] + cUpper * upperCol[3]))
lowerId = upperId
# fix the off-by one error
self.ramp.append(upperCol)
assert len(self.ramp)==self.nColors, "ColorRamp Logic Error: This ramp supports %d colors ONLY, but %d were found in the ramp." % (self.nColors, len(self.ramp))
def getRamp(self, colType=ColorTypes.RGBAf, colScale=1.0):
# update the ramp and return it
self.updateRamp()
if colType==ColorTypes.RGBAf:
if colScale==1.0:
return self.ramp
elif colType==ColorTypes.HEX6:
colScale = 255
return map(lambda col: "#%02x%02x%02x" % (colScale*col[0],colScale*col[1],colScale*col[2]), self.ramp)
elif colType==ColorTypes.HEX8:
colScale = 255
return map(lambda col: "#%02x%02x%02x%02x" % (colScale*col[0],colScale*col[1],colScale*col[2],colScale*col[3]), self.ramp)
def toPhotoImageString(self,nRows=1):
oneLine = "{" + " ".join(self.getRamp(ColorTypes.HEX6)) + "}"
if nRows==1:
return oneLine
else:
return " ".join([oneLine]*nRows)
# this belongs in the view
def toPhotoImage(self,nRows=1):
try:
from Tkinter import PhotoImage
except ImportError, e:
print "Error: could not import Tk. No image."
print "Error: ", e
return None
img = PhotoImage(width=self.nColors, height=nRows)
img.put(self.toPhotoImageString(nRows))
return img
def toCanvas(self,canvas,width,height,padX=0,padY=0):
r = self.CIRCLE_RADIUS
tmpKeys = self.keys.keys()
tmpKeys.sort()
# plottable window area; wH, wW
wH = height-2*padY
wW = width -2*padX
for idx in range(len(tmpKeys)):
pt1 = self.keys[tmpKeys[idx]]
origX1, origY1 = pt1.idx, pt1.color[3]
x1 = int(float(origX1)/float(self.nColors) * wW)
y1 = self.alphaToY(origY1)
y1 = int(wH * (1.0-float(y1)))
x1 += padX
y1 += padY
# if not last loop, then draw the line
if idx+1<len(tmpKeys):
pt2 = self.keys[tmpKeys[idx+1]]
origX2, origY2 = pt2.idx, pt2.color[3]
x2 = int(float(origX2)/float(self.nColors) * wW)
y2 = self.alphaToY(origY2)
y2 = int(wH * (1.0-float(y2)))
x2 += padX
y2 += padY
# plot the pt
unique_id = canvas.create_line((x1,y1,x2,y2),fill="black",width=1.0,tags="colorPt")
self.canvas_ids[unique_id] = idx
origColor = pt1.color
# convert the color from RGBA --> HEX6
colHEX6 = "#%02x%02x%02x" % (origColor[0]*255., origColor[1]*255., origColor[2]*255.)
# plot the pt
unique_id = canvas.create_oval((x1-r,y1-r,x1+r,y1+r),fill=colHEX6,tags="colorPt")
self.canvas_ids[unique_id] = tmpKeys[idx]
def clearCanvas(self):
for x in self.canvas_ids:
self.canvas.delete(x)
def getHandle(self):
return self.handle
def setHandle(self,handle):
self.handle = handle
def yToAlpha(self,y):
if y<=0:
return 0.0
elif y>=1:
return 1.0
else:
# return y
return (10.**y-1.) / 9.
def alphaToY(self,alpha):
if alpha<=0:
return 0.
elif alpha>=1:
return 1.0
else:
# return alpha
return math.log(1.0+9.*alpha,10.)
if __name__=="__main__":
c = ColorRamp(256,name="C")
# add some colors
c.addColor(1,(0,0,0,0))
print c
c.addColor(2,(1,1,1,1))
print c
c.addColor(250,(0.5, 0.5, 0.5, 0.5))
print c
# range checking
c.addColor(-1, (0,0,0,0))
print c
c.addColor(256, (1,2,3,4))
print c
# color scaling
c.addColor(45, (128, 255, 64, 32), colType=ColorTypes.RGBAi, colScale=255)
print c
# remove a color
c.removeColor(2)
print c
# range checking
c.removeColor(-1)
print c
# range checking
c.removeColor(2000)
print c
# check ready to use
c.addColor(0, (0,0,0,0))
print c
c.addColor(8, (1.0, 0.4, 0.0, 0.0))
print c
c.addColor(255, (1,1,1,1))
print c
# check ramp types
d = ColorRamp(32)
d.addColor(0, (0,0,0,0), colType=ColorTypes.RGBAi, colScale=255)
d.addColor(31, (255,255,255,255), colType=ColorTypes.RGBAi, colScale=255)
d.addColor(15, (1.0, 0.0, 0.0, 1.0))
print "Color Ramp as RGAf"
print d.getRamp()
print "Color Ramp as HEX6"
print d.getRamp(ColorTypes.HEX6)
print "Color Ramp as HEX8"
print d.getRamp(ColorTypes.HEX8)
print "Does adding/removing a pt screw up the model?"
f = ColorRamp(360)
# end pts
f.addColor(0, (0,0,0,0))
f.addColor(359, (1,1,1,1))
print f
print "Adding a pt"
f.addColor(7, (1.0, 0.0, 0.5, 0.25))
print f
print "Removing a pt"
f.removeColor(7)
print f
print "Add some more colors"
f.addColor(90, (1.0, 0.0, 0.0, 1.0))
f.addColor(270, (0.0, 0.0, 1.0, 1.0))
f.addColor(180, (0.0, 1.0, 0.0, 1.0))
print "Checking hex8 vlaues"
print f.getRamp(ColorTypes.HEX8)
print "To PhotoImage String: nRows=1"
print f.toPhotoImageString()
print "To PhotoImage String: nRows=32"
print f.toPhotoImageString(16)
try:
from Tkinter import *
root = Tk()
padX, padY = 30, 30
canvas = Canvas(root,height=10+padY,width=360+padX)
print "Try to make a color ramp image"
img = f.toPhotoImage(10)
canvas.create_image((padX/2, padY/2),image=img,anchor=NW)
canvas.pack()
root.mainloop()
except ImportError, e:
print "WARNING: Tkinter not installed for this Python version."
print "WARNING: Skipping the Tkinter test"
|
gratefulfrog/lib
|
python/pymol/colorramping.py
|
Python
|
gpl-2.0
| 13,994
|
from ConfigParser import SafeConfigParser
import os.path
import pytest
import re
import textwrap
from amazonproduct import utils
def pytest_addoption(parser):
group = parser.getgroup('amazonproduct',
'custom options for testing python-amazon-product-api')
group._addoption('--locale', action='append', dest='locales',
metavar='LOCALE', help='Locale to use (e.g. "de" or "us"). This option '
'can be used more than once. Note that tests with specific locales '
'defined which do not match the ones specified by this option will '
'NOT be run.')
group._addoption('--api-version', action='append', dest='versions',
metavar='VERSION', help='API version to use (e.g. "2010-09-01"). This '
'option can be used more than once. Note that tests with specific '
'versions defined which do not match the ones specified by this '
'option will NOT be run.')
group._addoption('--refetch', action='store', type='choice', dest='fetch',
metavar='method', choices=['no', 'missing', 'outdated', 'all'],
default='no', help='Fetch responses from live server and overwrite '
'previously cached XML file: one of no (default)|missing|outdated|'
'all.')
group._addoption('--processor', action='append', dest='processors',
metavar='PROCESSOR', choices=['objectify', 'etree', 'elementtree', 'minidom'],
help='Result processor to use: one of objectify|etree|minidom.')
def pytest_funcarg__server(request):
"""
Is the same as funcarg `httpserver` from plugin pytest-localserver with the
difference that it has a module-wide scope.
"""
def setup():
try:
localserver = request.config.pluginmanager.getplugin('localserver')
except KeyError:
raise pytest.skip('This test needs plugin pytest-localserver!')
server = localserver.http.Server()
server.start()
return server
def teardown(server):
server.stop()
return request.cached_setup(setup, teardown, 'module')
class DummyConfig (object):
"""
Dummy config to which to which you can add config files which in turn will
be created on the file system as temporary files.
"""
_file_counter = 0
def __init__(self, tmpdir):
self.tmpdir = tmpdir
self.files = []
def add_file(self, content, path):
"""
Writes one temporary file.
"""
if not path:
path = 'config-%i' % self._file_counter
self._file_counter += 1
p = self.tmpdir.ensure(os.path.expanduser(path))
p.write(textwrap.dedent(content))
self.files += [p.strpath]
_REG = re.compile(r'^#\s*file:\s+(.+?)\n', re.DOTALL | re.MULTILINE)
def load_from_string(self, content):
"""
Creates config files from string which is split up into file blocks and
written to temporary files.
"""
last = 0 # end of the last matching '# file: XXX'
path = None # path of the last matching '# file: XXX'
for m in self._REG.finditer(content):
if path is not None:
self.add_file(content[last:m.start()], path)
path = m.group(1)
last = m.end()
if path is not None:
self.add_file(content[last:], path)
else:
raise ValueError('Where are the file paths?')
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def __repr__(self):
return '<DummyConfig %s files=%r>' % (hex(id(self)), self.files)
def pytest_funcarg__configfiles(request):
"""
Returns a dummy config to which you can add config files which in turn will
be created on the file system as temporary files. You can use the following
methods:
To add a single config file use ``configfiles.add_file(content, path)``. If
you omit the ``path``, some arbitrary file name is used. ::
configfiles.add_file('''
[Credentials]
access_key = ABCDEFGH12345
secret_key = abcdegf43
locale = de''', path='/etc/amazon-product-api.cfg')
In order to add multiple config files at once, you can use the following
method::
configfiles.load_from_string('''
# file: /etc/boto.cfg
[Credentials]
aws_access_key_id = Hhdjksaiunkljfl
aws_secret_access_key = difioemLjdks02
# file: /home/user/.amazon-product-api
[Credentials]
locale = de
''')
"""
tmpdir = request.getfuncargvalue('tmpdir')
monkeypatch = request.getfuncargvalue('monkeypatch')
def prepend_tmppath(dir, files):
return [tmpdir.join(os.path.expanduser(fn)).strpath for fn in files]
monkeypatch.setattr(utils, 'CONFIG_FILES',
prepend_tmppath(tmpdir, utils.CONFIG_FILES))
cfg = DummyConfig(tmpdir)
return cfg
|
prats226/python-amazon-product-api-0.2.8
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 5,765
|
# Note:
# This is a total hack to implement simple disk-based memoization,
# with no expiration.
import os
PATH = '/tmp/django_cache_belonging_to_%s' % os.environ.get('USER', 'unknown')
def set(key, value):
if not os.path.isdir(PATH):
os.mkdir(PATH)
file_obj = file(os.path.join(PATH, key), 'w')
file_obj.write(value)
file_obj.close()
def get(key):
try:
with open(os.path.join(PATH, key)) as f:
return f.read()
except IOError:
return None
|
waseem18/oh-mainline
|
mysite/base/disk_cache.py
|
Python
|
agpl-3.0
| 504
|
from PyQt4 import QtCore, QtGui
import vtrace.qt
import vdb.qt.base
from vqt.main import *
class VdbRegistersWindow(vdb.qt.base.VdbWidgetWindow):
def __init__(self, db, dbt, parent=None):
vdb.qt.base.VdbWidgetWindow.__init__(self, db, dbt, parent=parent)
self.regsWidget = vtrace.qt.RegistersView(trace=dbt, parent=parent)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.regsWidget)
self.setLayout(vbox)
self.setWindowTitle('Registers')
vqtconnect(self.vqLoad, 'vdb:setregs')
vqtconnect(self.vqLoad, 'vdb:setthread')
def vqLoad(self):
'''
the widgets in RegistersView already register for notifications.
'''
self.regsWidget.reglist.vqLoad()
|
joshuahoman/vivisect
|
vdb/qt/registers.py
|
Python
|
apache-2.0
| 753
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Splitting dataset
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
class TestSplitDataset(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a split dataset:
Given I create a data source with "<params>" uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a dataset extracting a <rate> sample
And I wait until the dataset is ready less than <time_3> secs
When I compare the datasets' instances
Then the proportion of instances between datasets is <rate>
Examples:
| data | time_1 | time_2 | time_3 | rate |
| ../data/iris.csv | 10 | 10 | 10 | 0.8 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '10', '0.8', '{"category": 12}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file_with_args(self, example[0], example[5])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_split_dataset(self, example[4])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
dataset_create.i_compare_datasets_instances(self)
dataset_create.proportion_datasets_instances(self, example[4])
|
mmerce/python
|
bigml/tests/test_17_split_dataset.py
|
Python
|
apache-2.0
| 2,882
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
IDENTITY = 'identity:%s'
RULE_ADMIN_OR_CREDENTIAL_OWNER = (
'rule:admin_required or '
'(rule:owner and user_id:%(target.credential.user_id)s)')
RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
RULE_ADMIN_OR_TARGET_DOMAIN = (
'rule:admin_required or '
'token.project.domain.id:%(target.domain.id)s')
RULE_ADMIN_OR_TARGET_PROJECT = (
'rule:admin_required or '
'project_id:%(target.project.id)s')
RULE_ADMIN_OR_TOKEN_SUBJECT = 'rule:admin_or_token_subject'
RULE_ADMIN_REQUIRED = 'rule:admin_required'
RULE_REVOKE_EVENT_OR_ADMIN = 'rule:revoke_event_or_admin'
RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT = 'rule:service_admin_or_token_subject'
RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin'
RULE_TRUST_OWNER = 'user_id:%(trust.trustor_user_id)s'
rules = [
policy.RuleDefault(
name='admin_required',
check_str='role:admin or is_admin:1'),
policy.RuleDefault(
name='service_role',
check_str='role:service'),
policy.RuleDefault(
name='service_or_admin',
check_str='rule:admin_required or rule:service_role'),
policy.RuleDefault(
name='owner',
check_str='user_id:%(user_id)s'),
policy.RuleDefault(
name='admin_or_owner',
check_str='rule:admin_required or rule:owner'),
policy.RuleDefault(
name='token_subject',
check_str='user_id:%(target.token.user_id)s'),
policy.RuleDefault(
name='admin_or_token_subject',
check_str='rule:admin_required or rule:token_subject'),
policy.RuleDefault(
name='service_admin_or_token_subject',
check_str='rule:service_or_admin or rule:token_subject'),
policy.RuleDefault(
name='default',
check_str='rule:admin_required')
]
def list_rules():
return rules
|
ilay09/keystone
|
keystone/common/policies/base.py
|
Python
|
apache-2.0
| 2,354
|
import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT):
from mkdist import bsp_copy_files
import rtconfig
dist_dir = os.path.join(BSP_ROOT, 'dist', os.path.basename(BSP_ROOT))
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'Libraries')
library_dir = os.path.join(dist_dir, 'Libraries')
print("=> copy bsp drivers")
bsp_copy_files(os.path.join(library_path, 'rt_drivers'), os.path.join(library_dir, 'rt_drivers'))
print("=> copy bsp library")
bsp_copy_files(os.path.join(library_path, rtconfig.BSP_LIBRARY_TYPE), os.path.join(library_dir, rtconfig.BSP_LIBRARY_TYPE))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
|
onelife/rt-thread
|
bsp/at32/tools/sdk_dist.py
|
Python
|
gpl-2.0
| 848
|
import numpy as np
import warnings
def _bit_length_26(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return len(bin(x)) - 2
try:
from scipy.lib._version import NumpyVersion
except ImportError:
import re
string_types = str
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy.lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev-', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2 ** ((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2 ** _bit_length_26(quotient - 1)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
if NumpyVersion(np.__version__) >= '1.7.1':
np_matrix_rank = np.linalg.matrix_rank
else:
def np_matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
class CacheWriteWarning(UserWarning):
pass
class CachedAttribute(object):
def __init__(self, func, cachename=None, resetlist=None):
self.fget = func
self.name = func.__name__
self.cachename = cachename or '_cache'
self.resetlist = resetlist or ()
def __get__(self, obj, type=None):
if obj is None:
return self.fget
# Get the cache or set a default one if needed
_cachename = self.cachename
_cache = getattr(obj, _cachename, None)
if _cache is None:
setattr(obj, _cachename, resettable_cache())
_cache = getattr(obj, _cachename)
# Get the name of the attribute to set and cache
name = self.name
_cachedval = _cache.get(name, None)
# print("[_cachedval=%s]" % _cachedval)
if _cachedval is None:
# Call the "fget" function
_cachedval = self.fget(obj)
# Set the attribute in obj
# print("Setting %s in cache to %s" % (name, _cachedval))
try:
_cache[name] = _cachedval
except KeyError:
setattr(_cache, name, _cachedval)
# Update the reset list if needed (and possible)
resetlist = self.resetlist
if resetlist is not ():
try:
_cache._resetdict[name] = self.resetlist
except AttributeError:
pass
# else:
# print("Reading %s from cache (%s)" % (name, _cachedval))
return _cachedval
def __set__(self, obj, value):
errmsg = "The attribute '%s' cannot be overwritten" % self.name
warnings.warn(errmsg, CacheWriteWarning)
class _cache_readonly(object):
"""
Decorator for CachedAttribute
"""
def __init__(self, cachename=None, resetlist=None):
self.func = None
self.cachename = cachename
self.resetlist = resetlist or None
def __call__(self, func):
return CachedAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
cache_readonly = _cache_readonly()
|
CartoDB/crankshaft
|
src/py/crankshaft/crankshaft/regression/glm/utils.py
|
Python
|
bsd-3-clause
| 13,087
|
from unittest import TestCase
from itertools import permutations
from src.main.managers.players.hard_coded_player_manager import HardCodedPlayerManager
from src.main.managers.conflict.hard_coded_example_conflict_manager import HardCodedExampleConflictManager
from src.main.managers.conflict.automated_conflict_manager import AutomatedConflictManager
from src.main.beans.players.standard_player import StandardPlayer
from src.main.managers.players.base_player_manager import BasePlayerManager
class TestModelConflictManager(TestCase):
def testStandardConflict(self):
player_manager = HardCodedPlayerManager()
conflict_manager = HardCodedExampleConflictManager(player_manager)
winner = conflict_manager.run_conflict()
self._validate_alice(winner)
active_players = player_manager.get_active_players()
inactive_players = player_manager.get_inactive_players()
self.assertEqual(len(active_players), 1)
self.assertEqual(len(inactive_players), 1)
alice = active_players[0]
bob = inactive_players[0]
self._validate_alice(alice)
self._validate_bob(bob)
def test_add_players(self):
player0 = StandardPlayer(speed=100, name="FASTEST")
player1 = StandardPlayer(speed=5, name="Middle1")
player2 = StandardPlayer(speed=5, name="Middle2")
player3 = StandardPlayer(speed=1, name="SLOWEST")
players = [player0, player1, player2, player3]
for players_perm in permutations(players):
players_perm = list(players_perm)
player_manager = BasePlayerManager(players_perm)
conflict_manager = AutomatedConflictManager(player_manager)
sorted_players = conflict_manager._order_players_for_new_round(players_perm)
self.validate_players(sorted_players, player0, player1, player2, player3)
def validate_players(self, players, player0, player1, player2, player3):
self.assertEqual(players[0], player0)
self.assertTrue((players[1] == player1) or (players[1] == player2))
self.assertTrue((players[2] == player1) or (players[2] == player2))
self.assertEqual(players[3], player3)
def _validate_alice(self, alice):
self.assertEqual(alice.NAME, "Alice")
self.assertEqual(alice.HP, 4)
self.assertEqual(len(alice.item_manager._SWORD_BAG), 0)
self.assertEqual(len(alice.item_manager._SHIELD_BAG), 1)
self.assertEqual(len(alice.item_manager._SHOES_BAG), 0)
self.assertEqual(len(alice.item_manager._HAT_BAG), 1)
self.assertFalse(alice.is_spooked())
def _validate_bob(self, bob):
self.assertEqual(bob.NAME, "Bob")
self.assertEqual(bob.HP, 0)
self.assertEqual(len(bob.item_manager._SWORD_BAG), 0)
self.assertEqual(len(bob.item_manager._SHIELD_BAG), 0)
self.assertEqual(len(bob.item_manager._SHOES_BAG), 0)
self.assertEqual(len(bob.item_manager._HAT_BAG), 0)
self.assertFalse(bob.is_spooked())
|
malcolmwhite/DungeonsAndDragons
|
src/test/managers/test_model_conflict_manager.py
|
Python
|
mit
| 3,021
|
from PyDynamicStructures.dynamic_structure import *
from PyDynamicStructures.base_types import *
|
cpchrispye/PyDynamicStructures
|
PyDynamicStructures/__init__.py
|
Python
|
mit
| 96
|
# -*- coding: utf-8 -*-
"""
Gadfly queue storage.
:author: David Siroky (siroky@dasir.cz)
:license: MIT License (see LICENSE.txt)
"""
from __future__ import absolute_import
import os
from binascii import b2a_hex, a2b_hex
import gadfly
from snakemq.message import Message, MAX_UUID_LENGTH
from snakemq.messaging import MAX_IDENT_LENGTH
from snakemq.storage import QueuesStorageBase
###########################################################################
###########################################################################
class GadflyQueuesStorage(QueuesStorageBase):
def __init__(self, directory, filename):
if os.path.isfile(os.path.join(directory, filename + ".gfd")):
self.conn = gadfly.gadfly(filename, directory)
self.crs = self.conn.cursor()
else:
self.conn = gadfly.gadfly()
self.conn.startup(filename, directory)
self.crs = self.conn.cursor()
self.create_structures()
####################################################
def close(self):
if self.crs:
self.crs.close()
self.crs = None
if self.conn:
self.conn.close()
self.conn = None
####################################################
def create_structures(self):
# UUID is stored as hex
self.crs.execute("""CREATE TABLE items (queue_name VARCHAR(%i),
uuid VARCHAR(%i),
data VARCHAR,
ttl FLOAT,
flags INTEGER)""" %
(MAX_IDENT_LENGTH, MAX_UUID_LENGTH * 2))
self.conn.commit()
####################################################
def get_queues(self):
self.crs.execute("""SELECT queue_name FROM items GROUP BY queue_name""")
return [r[0] for r in self.crs.fetchall()]
####################################################
def get_items(self, queue_name):
self.crs.execute("""SELECT uuid, data, ttl, flags FROM items
WHERE queue_name = ?""",
(queue_name,))
items = []
for res in self.crs.fetchall():
uuid = a2b_hex(res[0])
data = res[1]
items.append(Message(uuid=uuid,
data=data,
ttl=res[2],
flags=res[3]))
return items
####################################################
def push(self, queue_name, item):
self.crs.execute("""INSERT INTO items
(queue_name, uuid, data, ttl, flags)
VALUES (?, ?, ?, ?, ?)""",
(queue_name, b2a_hex(item.uuid), item.data,
item.ttl, item.flags))
self.conn.commit()
####################################################
def delete_items(self, items):
for item in items:
self.crs.execute("""DELETE FROM items WHERE uuid = ?""",
(b2a_hex(item.uuid),))
self.conn.commit()
####################################################
def delete_all(self):
self.crs.execute("DELETE FROM items")
self.conn.commit()
####################################################
def update_items_ttl(self, items):
for item in items:
self.crs.execute("""UPDATE items SET ttl = ? WHERE uuid = ?""",
(item.ttl, b2a_hex(item.uuid)))
self.conn.commit()
|
dsiroky/snakemq
|
snakemq/storage/gadfly.py
|
Python
|
mit
| 3,702
|
def detect_single_character_xor(ciphertext):
return ""
|
gjtempleton/matasano_cryptopals
|
set1/challenge_4.py
|
Python
|
mit
| 59
|
'''
used for parsing tricky Svg elements such as bezier curves
Beizer code source: http://www.cs.nyu.edu/~dzorin/numcomp08/bezier.py
Circular fitting ref: http://wiki.scipy.org/Cookbook/Least_Squares_Circle
'''
import numpy
from numpy import array, mean, linalg, sqrt, pi, linspace, cos, sin, arctan2, cross, nan, isnan, arccos
def bezier_cubic_point( p0, p1, p2, p3, t):
''' source wikipedia'''
return p0*(1-t)**3 + 3*p1*t*(1-t)**2 + 3*p2*t**2*(1-t) + t**3 * p3
def bezier_cubic( p0, p1, p2, p3, t):
''' source wikipedia'''
B = numpy.array([
p0[0]*(1-t)**3 + 3*p1[0]*t*(1-t)**2 + 3*p2[0]*t**2*(1-t) + t**3 * p3[0],
p0[1]*(1-t)**3 + 3*p1[1]*t*(1-t)**2 + 3*p2[1]*t**2*(1-t) + t**3 * p3[1],
])
return B.transpose()
def fitCircle(X, Y):
'http://wiki.scipy.org/Cookbook/Least_Squares_Circle, algebraic approximation method'
x_m = mean(X)
y_m = mean(Y)
# calculation of the reduced coordinates
U = X - x_m
V = Y - y_m
# linear system defining the center (uc, vc) in reduced coordinates:
# Suu * uc + Suv * vc = (Suuu + Suvv)/2
# Suv * uc + Svv * vc = (Suuv + Svvv)/2
Suv = sum(U*V)
Suu = sum(U**2)
Svv = sum(V**2)
Suuv = sum(U**2 *V)
Suvv = sum(U* V**2)
Suuu = sum(U**3)
Svvv = sum(V**3)
# Solving the linear system
A = array([ [ Suu, Suv ], [Suv, Svv]])
B = array([ Suuu + Suvv, Svvv + Suuv ])/2.0
try:
uc, vc = linalg.solve(A, B)
except numpy.linalg.LinAlgError:
return 0,0,0,numpy.inf
xc_1 = x_m + uc
yc_1 = y_m + vc
# Calcul des distances au centre (xc_1, yc_1)
Ri_1 = sqrt((X-xc_1)**2 + (Y-yc_1)**2)
R_1 = mean(Ri_1)
residu_1 = sum((Ri_1-R_1)**2)
return xc_1, yc_1, R_1, residu_1
def fitCircle_to_path(P, points_per_segment=6):
X = []
Y = []
T = linspace(0,1,points_per_segment)
t0 = T**0 * (1-T)**3
t1 = 3* T**1 * (1-T)**2
t2 = 3* T**2 * (1-T)**1
t3 = T**3 * (1-T)**0
for C in P:
#print(C)
if len(C) == 4: #then cubic Bezier
p0, p1, p2, p3 = C
X = X + ( t0*p0[0] + t1*p1[0] + t2*p2[0] + t3*p3[0] ).tolist()
Y = Y + ( t0*p0[1] + t1*p1[1] + t2*p2[1] + t3*p3[1] ).tolist()
if len(C) == 3: #then quadratic Bezier plot, https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B.C3.A9zier_curves
#\mathbf{B}(t) = (1 - t)^{2}\mathbf{P}_0 + 2(1 - t)t\mathbf{P}_1 + t^{2}\mathbf{P}_2 \mbox{ , } t \in [0,1].
p0, p1, p2 = C
X = X + ( (1-T)**2*p0[0] + 2*(1-T)*T**p1[0] + T**2*p2[0] ).tolist()
Y = Y + ( (1-T)**2*p0[1] + 2*(1-T)*T**p1[1] + T**2*p2[1] ).tolist()
if len(X) > 3:
return fitCircle(array(X), array(Y))
else:
return 0,0,0,10**6
def arccos2( v ):
if -1 <= v and v <= 1:
return arccos( v )
elif 1 < v and v < 1.001: #numerical precission error case 1
return 0.0
elif -1.001 < v and v < -1: #numerical precission error case 2
return pi
else:
return nan
def findCircularArcCentrePoint_new(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug=False ):
'''
http://www.w3.org/TR/SVG/paths.html#PathDataEllipticalArcCommands
The elliptical arc command draws a section of an ellipse which meets the following constraints:
Of the four candidate arc sweeps, two will represent an arc sweep of greater than or equal to 180 degrees (the "large-arc"), and two will represent an arc sweep of less than or equal to 180 degrees (the "small-arc").
If large-arc-flag is '1', then one of the two larger arc sweeps will be chosen; otherwise, if large-arc-flag is '0', one of the smaller arc sweeps will be chosen.
If sweep-flag is '1', then the arc will be drawn in a "positive-angle" direction (i.e., the ellipse formula x=cx+rx*cos(theta) and y=cy+ry*sin(theta) is evaluated such that theta starts at an angle corresponding to the current point and increases positively until the arc reaches (x,y)).
A value of 0 causes the arc to be drawn in a "negative-angle" direction (i.e., theta starts at an angle value corresponding to the current point and decreases until the arc reaches (x,y)).
Center calculation
(x_1 - x_c)**2 + (y_1 - y_c)**2 = r**2 (1)
(x_2 - x_c)**2 + (y_2 - y_c)**2 = r**2 (2)
giving 2 posible centre points from, that is where largeArc and Sweep come in
using geometry to solve for centre point...
'''
# the law of cosines states c^2 = a^2 + b^2 - 2ab*cos(gamma)
c,a = r,r
b = ( ( x_2-x_1 )**2 + ( y_2-y_1 )**2 ) ** 0.5
if a*b != 0:
cos_gamma = ( a**2 + b**2 - c**2 ) / ( 2*a*b )
else:
return numpy.nan, numpy.nan
gamma = arccos2( cos_gamma )
if isnan(gamma):
return numpy.nan, numpy.nan
if debug: print('x1,y1 : %1.2f, %1.2f' % (x_1, y_1))
if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
if debug: print('large arc : %s' % largeArc)
if debug: print('sweep : %s' % sweep )
if debug: print('gamma %3.1f' % (gamma/pi*180))
angle_1_2 = arctan2( y_2 - y_1, x_2 - x_1) #range ``[-pi, pi]``
# given the two possible center points of
#c_x = x_1 + r*cos(angle_1_2 + gamma)
#c_y = y_1 + r*sin(angle_1_2 + gamma)
#if debug: print('possible c_x,c_y at %1.2f, %1.2f' % (c_x, c_y))
#c_x_alt = x_1 + r*cos(angle_1_2 - gamma)
#c_y_alt = y_1 + r*sin(angle_1_2 - gamma)
#if debug: print(' or c_x,c_y at %1.2f, %1.2f' % (c_x_alt, c_y_alt))
#A = array([x_1, y_1, 0.0])
#B = array([x_2, y_2, 0.0])
#C = array([c_x, c_y, 0.0])
#if debug: print('cross(A-C, B-A)[2] : %s' % cross(A-C, B-A)) #Always positve, must be a result of construction!
#small_arc_theta_inc = cross(A-C, B-A)[2] > 0 #CW = clock wise
#large_arc_theta_inc = not small_arc_theta_inc
#if debug: print('small_arc_theta_inc : %s' % small_arc_theta_inc)
#if largeArc:
# correctCentre = large_arc_theta_inc == sweep
#else: #small arc
# correctCentre = small_arc_theta_inc == sweep
if largeArc: #from geometric construction (i thinks)
addGamma = not sweep
else:
addGamma = sweep
if addGamma:
c_x = x_1 + r*cos(angle_1_2 + gamma)
c_y = y_1 + r*sin(angle_1_2 + gamma)
else:
c_x = x_1 + r*cos(angle_1_2 - gamma)
c_y = y_1 + r*sin(angle_1_2 - gamma)
return c_x, c_y
def findCircularArcCentrePoint_old(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug=False ):
'''
(x_1 - x_c)**2 + (y_1 - y_c)**2 = r**2 (1)
(x_2 - x_c)**2 + (y_2 - y_c)**2 = r**2 (2)
giving 2 posible centre points from, that is where largeArc and Sweep come in
using geometry to solve for centre point...
'''
from numpy import arccos, arctan2, sin, cos, pi
# the law of cosines states c^2 = a^2 + b^2 - 2ab*cos(gamma)
c,a = r,r
b = ( ( x_2-x_1 )**2 + ( y_2-y_1 )**2 ) ** 0.5
if a*b != 0:
cos_gamma = ( a**2 + b**2 - c**2 ) / ( 2*a*b )
else:
return numpy.nan, numpy.nan
gamma = arccos2( cos_gamma )
if isnan(gamma):
return numpy.nan, numpy.nan
if debug: print('x1,y1 : %1.2f, %1.2f' % (x_1, y_1))
if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
if debug: print('large arc : %s' % largeArc)
if debug: print('sweep : %s' % sweep )
if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
if debug: print('gamma %3.1f' % (gamma/pi*180))
angle_1_2 = arctan2( y_2 - y_1, x_2 - x_1) #range ``[-pi, pi]``
# given the two possible center points of
c_x = x_1 + r*cos(angle_1_2 + gamma)
c_y = y_1 + r*sin(angle_1_2 + gamma)
if debug: print('possible c_x,c_y at %1.2f, %1.2f' % (c_x, c_y))
c_x_alt = x_1 + r*cos(angle_1_2 - gamma)
c_y_alt = y_1 + r*sin(angle_1_2 - gamma)
if debug: print(' or c_x,c_y at %1.2f, %1.2f' % (c_x_alt, c_y_alt))
angle_1 = arctan2( y_1 - c_y, x_1 - c_x)
angle_2 = arctan2( y_2 - c_y, x_2 - c_x)
if debug: print(' angle_1 %3.1f deg' % (angle_1 / pi * 180))
if debug: print(' angle_2 %3.1f deg' % (angle_2 / pi * 180))
if not largeArc:
if abs(angle_1 - angle_2) > pi:
if angle_1 < angle_2:
angle_1 = angle_1 + 2*pi
else:
angle_2 = angle_2 + 2*pi
else:
if abs(angle_1 - angle_2) < pi:
if angle_1 < angle_2:
angle_1 = angle_1 + 2*pi
else:
angle_2 = angle_2 + 2*pi
if debug: print('after largeArc flag correction')
if debug: print(' angle_1 %3.1f deg' % (angle_1 / pi * 180))
if debug: print(' angle_2 %3.1f deg' % (angle_2 / pi * 180))
if sweep:
correctCentre = angle_2 > angle_1
else:
correctCentre = angle_2 < angle_1
if correctCentre:
return c_x, c_y
else:
return c_x_alt, c_y_alt
findCircularArcCentrePoint = findCircularArcCentrePoint_new
def pointsAlongCircularArc_new(r, x_1, y_1, x_2, y_2, largeArc, sweep, noPoints, debug=False ):
'excluding first point'
c_x, c_y = findCircularArcCentrePoint(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug)
a,b = r,r
c = ( ( x_2-x_1 )**2 + ( y_2-y_1 )**2 ) ** 0.5
dtheta = arccos2( ( a**2 + b**2 - c**2 ) / ( 2*a*b ) )
assert dtheta >= 0
if largeArc:
dtheta = 2*pi - dtheta
if not sweep: # If sweep-flag is '1', then the arc will be drawn in a "positive-angle" direction
dtheta = -dtheta
theta_start = arctan2( y_1 - c_y, x_1 - c_x)
points = []
for i in range(1,noPoints+1):
a = theta_start + i*dtheta/noPoints
points.append([
c_x + r*cos(a),
c_y + r*sin(a)
])
return points
def pointsAlongCircularArc_old(r, x_1, y_1, x_2, y_2, largeArc, sweep, noPoints, debug=False ):
c_x, c_y = findCircularArcCentrePoint(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug)
angle_1 = arctan2( y_1 - c_y, x_1 - c_x)
angle_2 = arctan2( y_2 - c_y, x_2 - c_x)
if not sweep: # arc sweeps through increasing angles # arc drawing CCW,
if angle_2 > angle_1:
angle_2 = angle_2 - 2*pi
else:
if angle_1 > angle_2:
angle_2 = angle_2 + 2*pi
points = []
for i in range(1,noPoints+1):
a = angle_1 + (angle_2 - angle_1) * 1.0*i/noPoints
points.append([
c_x + r*cos(a),
c_y + r*sin(a)
])
return points
pointsAlongCircularArc = pointsAlongCircularArc_new
def toStdOut(txt):
print(txt)
def fitCircleNumerically( X, Y, printF=toStdOut ):
from cgpr import CGPR, GradientApproximatorForwardDifference
X = array(X)
Y = array(Y)
def f(x):
#c_x, c_y = x #not working as planning
c_x, c_y, r = x
D = (X - c_x)**2 + (Y - c_y)**2 - r**2
return linalg.norm(D)
grad_f = GradientApproximatorForwardDifference(f)
#initial guess
x0 = numpy.array([0.0, 0.0, 1.0])
xOpt = CGPR( x0, f, grad_f, debugPrintLevel=2, printF=printF, lineSearchIt=20 )
error = f(xOpt)
c_x, c_y, R = xOpt
#R = mean( (X - c_x)**2 + (Y - c_y)**2)
return c_x, c_y, R, error
if __name__ == '__main__':
from matplotlib import pyplot
from numpy.random import rand
print('testing circle lib')
P = numpy.array( [
(0., 0.), # P0
(0.2, 1.), # P1
(1., 0.8), # P2
(0.8, 0.), # P3
] )
pyplot.plot( P[:,0], P[:,1],'--k')
pyplot.title('Bezier plot, source data from http://matplotlib.org/users/path_tutorial.html')
#B = numpy.array( [ bezier_point_cubic( P[0], P[1], P[2], P[3], t)
# for t in numpy.linspace(0,1,101) ] )
B = bezier_cubic( P[0], P[1], P[2], P[3], numpy.linspace(0,1,101) )
pyplot.plot( B[:,0], B[:,1] )
#print now fitting circle to data
c_x, c_y, R, R_error = fitCircle( B[:,0], B[:,1])
def plotCircle( cx, cy, R, style, label=None):
T = linspace(0,2*pi)
X = c_x + cos(T)*R
Y = c_y + sin(T)*R
pyplot.plot( X, Y, style, label=label )
plotCircle( c_x, c_y, R, 'g-.')
pyplot.axis('equal')
pyplot.figure()
n = 20
for i, angleUpperlimit in enumerate(numpy.array([45, 90, 180, 270])*pi/180):
r = 10 + 40*rand()
angles = rand(n)*angleUpperlimit
c_x, c_y = 42*rand(2) - 21
X = c_x + cos(angles)*r + rand(n)
Y = c_y + sin(angles)*r
pyplot.subplot(2,2,i+1)
pyplot.plot( X, Y,'go')
c_x, c_y, R, R_error = fitCircle( X, Y)
plotCircle( c_x, c_y, R, 'g:', label='analytical')
c_x, c_y, R, R_error = fitCircleNumerically( X, Y)
plotCircle( c_x, c_y, R, 'b--', label='numerical')
pyplot.axis('equal')
if i == 0:
pyplot.legend()
pyplot.show()
|
crobarcro/FreeCAD_drawing_dimensioning
|
circleLib.py
|
Python
|
gpl-3.0
| 12,906
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Table, Column, Integer, ForeignKey, CheckConstraint, Boolean, DateTime
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm import relation, mapper
import datetime
from eos.db import saveddata_meta
from eos.saveddata.module import Module
from eos.saveddata.mutator import Mutator
from eos.saveddata.fit import Fit
modules_table = Table("modules", saveddata_meta,
Column("ID", Integer, primary_key=True),
Column("fitID", Integer, ForeignKey("fits.ID"), nullable=False, index=True),
Column("itemID", Integer, nullable=True),
Column("baseItemID", Integer, nullable=True),
Column("mutaplasmidID", Integer, nullable=True),
Column("dummySlot", Integer, nullable=True, default=None),
Column("chargeID", Integer),
Column("state", Integer, CheckConstraint("state >= -1"), CheckConstraint("state <= 2")),
Column("projected", Boolean, default=False, nullable=False),
Column("position", Integer),
Column("created", DateTime, nullable=True, default=datetime.datetime.now),
Column("modified", DateTime, nullable=True, onupdate=datetime.datetime.now),
CheckConstraint('("dummySlot" = NULL OR "itemID" = NULL) AND "dummySlot" != "itemID"'))
mapper(Module, modules_table,
properties={
"owner": relation(Fit),
"mutators": relation(
Mutator,
backref="module",
cascade="all,delete-orphan",
collection_class=attribute_mapped_collection('attrID')
)
})
|
bsmr-eve/Pyfa
|
eos/db/saveddata/module.py
|
Python
|
gpl-3.0
| 2,658
|
__description__ = \
"""
Takes a pdb file with multiple snapshots from a trajectory and colors by time
point using user-specified color gradients. Can take multiple trajectories
simultaneously.
"""
__author__ = "Michael J. Harms"
__date__ = "110223"
__usage__ = "pymol -a *.pdb timePlot.py [pdb files *must* have .pdb extension]"
import cmd, sys
# This is a set of in-line functions used to specify color gradients. fx is
# the fractional time of a given step within the trajectories (ranging from 0
# to 1). "color_graidents" is a list of references to these functions.
# If multiple trajectories are loaded, the gradients are applied in the order
# specified in color_gradients. The trajectories are ordered by where they
# occur in the command line.
def a(fx): return [fx,0,(1.0-fx)] #fx,fx,1-fx]
def b(fx): return [1,fx,1] #1,fx,fx]
def c(fx): return [1,1,fx] #1-fx,1-fx,fx]
color_gradients = [a,b,c]
# Default representations
cmd.hide("everything","all")
cmd.show("ribbon","all")
cmd.show("spheres","resn N06 or resn N15 or resn E40 or resn EST")
# Create a list of unique trajectories loaded by parsing the command line
unique = [arg[:-4] for arg in sys.argv if arg.endswith(".pdb")]
# Go through each trajecotry
models = cmd.get_names("all")
for i, u in enumerate(unique):
traj = [m for m in models if "_".join(m.split("_")[:-1]) == u]
# Go through each step in the trajectory
num_steps = float(len(traj))
for j, t in enumerate(traj):
# Determine rgb of new color
fx = j/num_steps
color_list = color_gradients[i](fx)
# Create a new color
color_name = "col%s%i" % (u,j)
cmd.set_color(color_name,color_list)
# Apply the color to step j of trajectory u
cmd.color(color_name,t)
|
harmsm/md-analysis-tools
|
motion-over-trajectory/timePlot.py
|
Python
|
unlicense
| 1,794
|
"""Extension to format and index PSI variables."""
#Sphinx.add_object_type(psivar, rolename, indextemplate='', parse_node=None, ref_nodeclass=None, objname='', doc_field_types=[])
def setup(app):
app.add_object_type('psivar', 'psivar', indextemplate='single: %s')
|
loriab/qcdb
|
docs/source/psi4_sptheme/ext/psidomain.py
|
Python
|
lgpl-3.0
| 272
|
# from tkinter import *
# from tkFileDialog import askopenfilename
# import Tkconstants
# class Interface:
# def __init__(self, master):
# self.master = master
# master.title("SeqPyPlot v0.2 GUI")
# master.geometry('680x500')
# self.button_opt = {'fill': Tkconstants.BOTH, 'padx': 5, 'pady': 5}
# # main script string
# self.callstring = dict()
# # defining options for opening a directory
# self.dir_opt = options = {}
# options['initialdir'] = 'C:\\'
# options['mustexist'] = False
# options['parent'] = root
# options['title'] = 'This is a title'
# def set_entry(self, labels):
# row = 0
# col = 2
# for label, default_name, call, default in labels:
# optionlabel = Label(self.master, text=label)
# optionlabel.grid(row=row, column=col)
# row += 1
# e = Entry(self.master)
# e.insert(END, default_name)
# e.grid(row=row, column=col)
# row += 1
# def file_handler(self, opt, vars, pos, row, col):
# filepath = askopenfilename()
# self.callstring[opt] = filepath
# Label(self.master, text='...' + filepath[-20:]).grid(row=row, column=col+1)
# var[pos].get()
# def set_file_loader(self, loader_list):
# row = 0
# col = 3
# buttons = []
# vars = []
# pos = 0
# for name, opt, default in loader_list:
# Label(self.master, text=name).grid(row=row, column=col)
# row += 1
# vars.append(StringVar(self.master))
# vars[pos].set(default)
# buttons.append(Button(self.master, text=name, command=lambda: self.file_handler(opt, vars, pos, row, col), padx=10, pady=5))
# buttons[pos].grid(row=row, column=col, padx=10)
# row += 1
# pos += 1
# def set_dropdown(self, menu_list):
# col = 1
# row = 0
# for label, options, default_name, call, default in menu_list:
# menulabel = Label(self.master, text=label)
# menulabel.grid(row=row, column=col)
# row += 1
# variable = StringVar(self.master)
# variable.set(default)
# menu = OptionMenu(self.master, variable, *options)
# menu.grid(row=row, column=col)
# row += 1
# if __name__ == "__main__":
# root = Tk()
# seqPyPlot_GUI = Interface(root)
# entry_list = [('Stages', 'None', '-time ', 'None'),
# ('Output folder name', 'None', '-out ', 'None'),
# ('Conditions', 'Series1,Series2', '-condition ', 'Series1,Series2'),
# ('Lower Expr Limit', '10', '-low ', '10'),
# ('Upper Expr Limit', '2000', '-hi ', '2000'),
# ('Min Expr Diff', '3.5', '-dif ', '3.5'),
# ('Log2fold Cutoff', '0.7', '-log2 ', '0.7'),
# ('File Prefix', 'SeqPyPlot_out', '-prefix ', 'SeqPyPlot_out')]
# # Label, options, default_name, call, default
# menu_list = (('Data Type', ('htseq', 'cuffnorm'), 'htseq', '-data_type ', 'htseq'),
# ('Number of Plots', ('1', '2'), '2', '-num ', '2'),
# ('Remove Transient/Off Genes', ('Yes', 'No'), 'No', '-remove ', 'False'),
# ('Tally flagged Genes', ('Yes', 'No'), 'Yes', '-tally ', 'True'),
# ('Write out Filter and Plot Files', ('Yes', 'No'), 'Yes', '-report ', 'True'),
# ('Write out ERCC data', ('Yes', 'No'), 'No', '-ercc ', 'False'))
# loader_list = [('Filter Results', '-fr ', 'None'),
# ('Raw Data', '-raw_data ', 'None'),
# ('Plot Data', '-plot_data ', 'None'),
# ('Gene List to Plot', '-gene_list ', 'None'),
# ('Custom DE List', '-results ', 'None')]
# seqPyPlot_GUI.set_entry(entry_list)
# seqPyPlot_GUI.set_dropdown(menu_list)
# seqPyPlot_GUI.set_file_loader(loader_list)
# Button(root, text="TEST").grid(row=10, column=10)
# print seqPyPlot_GUI.callstring
# root.mainloop()
|
paulgradie/SeqPyPlot
|
dev/SeqPyPlot_GUI.py
|
Python
|
gpl-3.0
| 4,216
|
import ago
import logbook
import requests
from piper import config
from piper import logging
from piper import utils
from piper.api import RESTful
from piper.db.core import LazyDatabaseMixin
from piper.vcs import GitVCS
class Build(LazyDatabaseMixin):
"""
The main pipeline runner.
This class loads the configurations, sets up all other components,
executes them in whatever order they are supposed to happen in, collects
data about the state of the pipeline and persists it, and finally tears
down the components that needs tearing down.
"""
FIELDS_TO_DB = (
# Main fields
'id',
'agent',
'config',
# Booleans
'success',
'crashed',
# String fields
'status',
# Timestamps
'started',
'ended',
'created',
)
def __init__(self, config):
self.config = config
self.vcs = GitVCS('github', 'git@github.com')
self.id = None
self.version = None
self.steps = {}
self.order = []
self.started = None
self.success = None
self.crashed = False
self.status = None
self.pipeline = None
self.env = None
self.log = logbook.Logger(self.__class__.__name__)
def run(self, pipeline, env):
"""
Main entry point
This is run when starting the script from the command line.
Returns boolean success.
"""
self.pipeline = pipeline
self.env = env
self.log.info('Setting up {0}...'.format(self.pipeline))
self.started = utils.now()
self.setup()
self.execute()
self.teardown()
self.finish()
return self.success
def finish(self):
self.ended = utils.now()
verb = 'finished successfully in'
if not self.success:
verb = 'failed after'
ts = ago.human(
self.ended - self.started,
precision=5,
past_tense='%s {0}' % verb # hee hee
)
self.log.info('{0} {1}'.format(self.version, ts))
self.log_handler.pop_application()
def setup(self):
"""
Performs all setup steps
This is basically an umbrella function that runs setup for all the
things that the class needs to run a fully configured execute().
"""
# self.add_build()
self.set_logfile()
self.set_version()
self.configure_env()
self.configure_steps()
self.configure_pipeline()
self.setup_env()
def queue(self, pipeline, env):
"""
Use the API to enqueue a build.
"""
self.pipeline = pipeline
self.env = env
self.log.info('Adding to queue: {0} {1}'.format(pipeline, env))
app_conf = config.get_app_config()
url = '{0}/builds/'.format(app_conf['masters'][0])
requests.post(url, json=self.config.raw)
def set_logfile(self):
"""
Set the log file to store the build log in.
"""
self.log_key = '{0} {1}'.format(
self.__class__.__name__,
self.id[:7] if self.id else '',
)
self.log = logbook.Logger(self.log_key)
self.logfile = 'logs/piper/{0}.log'.format(self.id)
self.log_handler = logging.get_file_logger(self.logfile)
self.log_handler.push_application()
def set_version(self):
"""
Set the version for this pipeline
"""
self.log.debug('Determining version...')
ver_config = self.config.raw['version']
cls = self.config.classes[ver_config['class']]
self.version = cls(self, ver_config)
self.version.validate()
self.log.info(str(self.version))
def configure_env(self):
"""
Configures the environment according to its config file.
"""
self.log.debug('Loading environment...')
env_config = self.config.raw['envs'][self.env]
cls = self.config.classes[env_config['class']]
self.env = cls(self, env_config)
self.log.debug('Validating env config...')
self.env.validate()
self.env.log.debug('Environment configured.')
def configure_steps(self):
"""
Configures the steps according to their config sections.
"""
for step_key, step_config in self.config.raw['steps'].items():
cls = self.config.classes[step_config['class']]
step = cls(self, step_config, step_key)
step.log.debug('Validating config...')
step.validate()
step.log.debug('Step configured.')
self.steps[step_key] = step
def configure_pipeline(self):
"""
Places steps in proper order according to the pipeline.
"""
for step_key in self.config.raw['pipelines'][self.pipeline]:
step = self.steps[step_key]
self.order.append(step)
self.log.debug('Step order configured.')
self.log.info('Steps: ' + ', '.join(map(repr, self.order)))
def setup_env(self):
"""
Execute setup steps of the env
"""
self.env.log.debug('Setting up env...')
self.env.setup()
def execute(self):
"""
Runs the steps and determines whether to continue or not.
Of all the things to happen in this application, this is probably
the most important part!
"""
total = len(self.order)
self.log.info('Running {0}...'.format(self.pipeline))
for x, step in enumerate(self.order, start=1):
step.set_index(x, total)
# Update db status to show that we are running this build
self.status = '{0}/{1}: {2}'.format(x, total, step.key)
# self.db.build.update(self)
step.log.info('Running...')
proc = self.env.execute(step)
if proc.success:
step.log.info('Step complete.')
else:
# If the success is not positive, bail and stop running.
step.log.error('Step failed.')
self.log.error('{0} failed.'.format(self.pipeline))
self.success = False
break
self.status = ''
# As long as we did not break out of the loop above, the build is
# to be deemed successful.
if self.success is not False:
self.success = True
def teardown(self):
self.teardown_env()
def teardown_env(self):
"""
Execute teardown step of the env
"""
self.env.log.debug('Tearing down env...')
self.env.teardown()
class ExecCLI:
config_class = config.BuildConfig
def __init__(self, config):
self.config = config
def compose(self, parser): # pragma: nocover
cli = parser.add_parser('exec', help='Execute a pipeline locally')
cli.add_argument(
'pipeline',
nargs='?',
default='build',
help='The pipeline to execute',
)
cli.add_argument(
'env',
nargs='?',
default='local',
help='The environment to execute in',
)
return 'exec', self.run
def run(self, ns):
success = Build(self.config).run(ns.pipeline, ns.env)
return 0 if success else 1
class BuildCLI:
config_class = config.BuildConfig
def __init__(self, config):
self.config = config
def compose(self, parser): # pragma: nocover
cli = parser.add_parser('build', help='Build on an agent')
cli.add_argument(
'pipeline',
nargs='?',
default='build',
help='The pipeline to execute',
)
cli.add_argument(
'env',
nargs='?',
default='local',
help='The environment to execute in',
)
return 'build', self.run
def run(self, ns):
success = Build(self.config).queue(ns.pipeline, ns.env)
return 0 if success else 1
class BuildAPI(RESTful):
"""
API endpoint for CRUD operations on builds.
"""
def __init__(self, config):
super().__init__(config)
self.routes = (
('GET', '/builds/{id}', self.get),
('POST', '/builds/', self.create),
)
def get(self, request):
"""
Get one build.
"""
id = request.match_info.get('id')
build = self.db.build.get(id)
if build is None:
return {}, 404
return build
def create(self, request):
"""
Put a build into the database.
:returns: id of created object
"""
config = yield from self.extract_json(request)
build = Build(config)
build.created = utils.now() # TODO: Should be in Build()?
id = self.db.build.add(build)
self.log.info('Build {0} added.'.format(id))
ret = {
'id': id,
}
return ret, 201
|
thiderman/piper
|
piper/build.py
|
Python
|
mit
| 9,140
|
"""
OAuth dance session
"""
from google.appengine.ext import ndb
from ferris.core.ndb import Model
from credentials_property import CredentialsProperty
from ndb_storage import NdbStorage
import hashlib
class UserCredentials(Model):
user = ndb.UserProperty(indexed=True)
scopes = ndb.StringProperty(repeated=True, indexed=False)
admin = ndb.BooleanProperty(indexed=True)
credentials = CredentialsProperty(indexed=False)
filter_scopes = ndb.ComputedProperty(lambda x: ','.join(sorted(x.scopes)), indexed=True)
@classmethod
def _get_kind(cls):
return '__ferris__oauth2_user_credentials'
@classmethod
def after_get(cls, key, item):
if item and item.credentials:
item.credentials = NdbStorage(key, 'credentials', item).get()
@classmethod
def _get_key(cls, user, scopes, admin):
scopes_hash = hashlib.sha1(','.join(sorted(scopes))).hexdigest()
return ndb.Key(cls, '%s:%s:%s' % (user, scopes_hash, True if admin else False))
@classmethod
def create(cls, user, scopes, credentials, admin):
key = cls._get_key(user, scopes, admin)
item = cls(key=key, user=user, scopes=scopes, credentials=credentials, admin=admin)
item.put()
return item
@classmethod
def find(cls, user=None, scopes=None, admin=False):
if user and scopes:
key = cls._get_key(user, scopes, admin)
x = key.get()
else:
q = cls.query()
if user:
q = q.filter(cls.user == user)
if scopes:
q = q.filter(cls.filter_scopes == ','.join(sorted(scopes)))
if admin:
q = q.filter(cls.admin == admin)
x = q.get()
if x:
cls.after_get(x.key, x)
return x
@classmethod
def delete_all(cls, user):
c = cls.query().filter(user=user)
for x in c:
x.key.delete()
def find_credentials(user=None, scopes=None, admin=None):
"""
Finds credentials that fit the criteria provided. If no user is provided,
the first set of credentials that have the given scopes and privilege level.
Returns None if no credentials are found.
"""
return UserCredentials.find(user, scopes, admin)
|
yowmamasita/social-listener-exam
|
ferris/core/oauth2/user_credentials.py
|
Python
|
mit
| 2,290
|
from __future__ import print_function
from random import shuffle
from myhdl import *
class FIFO(object):
"""
FIFO interface and model.
"""
def __init__(self, depth=16, width=16, clock_read=None, clock_write=None):
self.depth = depth
self.clock_write = clock_write
self.clock_read = clock_read
self.wr = Signal(bool(0)) # write strobe
self.rd = Signal(bool(0)) # read strobe
self.empty = Signal(bool(1)) # FIFO is empty
self.full = Signal(bool(0)) # FIFO is full
self.data_i = Signal(intbv(0)[width:]) # data input
self.data_o = Signal(intbv(0)[width:]) # data output
self.data_valid = Signal(bool(0)) # data out is valid
# modeling only
self._fifo = []
def __str__(self):
s = "full: {}, empty: {}, count: {}".format(self.full, self.empty, len(self._fifo))
return s
@property
def count(self):
return len(self._fifo)
def _update_flags(self):
if len(self._fifo) >= self.depth:
self.full.next = True
else:
self.full.next = False
if len(self._fifo) == 0:
self.empty.next = True
else:
self.empty.next = False
def write(self, obj):
""" write to the FIFO (model)
:param data: data to push onto the FIFO
:return: None
not convertible
"""
if len(self._fifo) < self.depth:
self._fifo.append(obj)
self._update_flags()
def read(self):
""" read from the FIFO (model)
:return: data read
not convertible
"""
obj = None
if len(self._fifo) > 0:
obj = self._fifo.pop(0)
self._update_flags()
return obj
def is_empty(self):
return len(self._fifo) == 0
def is_full(self):
return len(self._fifo) >= self.depth
def shuffle(self):
self.shuffle(self._fifo)
|
cfelton/parallella_elink
|
elink/_fifo_i.py
|
Python
|
mit
| 2,030
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order import DependentHostedNumberOrderList
class AuthorizationDocumentList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the AuthorizationDocumentList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
"""
super(AuthorizationDocumentList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/AuthorizationDocuments'.format(**self._solution)
def stream(self, email=values.unset, status=values.unset, limit=None,
page_size=None):
"""
Streams AuthorizationDocumentInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode email: Email.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(email=email, status=status, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, email=values.unset, status=values.unset, limit=None,
page_size=None):
"""
Lists AuthorizationDocumentInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode email: Email.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance]
"""
return list(self.stream(email=email, status=status, limit=limit, page_size=page_size, ))
def page(self, email=values.unset, status=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of AuthorizationDocumentInstance records from the API.
Request is executed immediately
:param unicode email: Email.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
data = values.of({
'Email': email,
'Status': status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return AuthorizationDocumentPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AuthorizationDocumentInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AuthorizationDocumentPage(self._version, response, self._solution)
def create(self, hosted_number_order_sids, address_sid, email, contact_title,
contact_phone_number, cc_emails=values.unset):
"""
Create the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode contact_title: Title of signee of this Authorization Document.
:param unicode contact_phone_number: Authorization Document's signee's phone number.
:param unicode cc_emails: A list of emails.
:returns: The created AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
data = values.of({
'HostedNumberOrderSids': serialize.map(hosted_number_order_sids, lambda e: e),
'AddressSid': address_sid,
'Email': email,
'ContactTitle': contact_title,
'ContactPhoneNumber': contact_phone_number,
'CcEmails': serialize.map(cc_emails, lambda e: e),
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return AuthorizationDocumentInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a AuthorizationDocumentContext
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
return AuthorizationDocumentContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a AuthorizationDocumentContext
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
return AuthorizationDocumentContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentList>'
class AuthorizationDocumentPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the AuthorizationDocumentPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
"""
super(AuthorizationDocumentPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AuthorizationDocumentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return AuthorizationDocumentInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentPage>'
class AuthorizationDocumentContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the AuthorizationDocumentContext
:param Version version: Version that contains the resource
:param sid: AuthorizationDocument sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
super(AuthorizationDocumentContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/AuthorizationDocuments/{sid}'.format(**self._solution)
# Dependents
self._dependent_hosted_number_orders = None
def fetch(self):
"""
Fetch the AuthorizationDocumentInstance
:returns: The fetched AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return AuthorizationDocumentInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, hosted_number_order_sids=values.unset,
address_sid=values.unset, email=values.unset, cc_emails=values.unset,
status=values.unset, contact_title=values.unset,
contact_phone_number=values.unset):
"""
Update the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param unicode contact_title: Title of signee of this Authorization Document.
:param unicode contact_phone_number: Authorization Document's signee's phone number.
:returns: The updated AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
data = values.of({
'HostedNumberOrderSids': serialize.map(hosted_number_order_sids, lambda e: e),
'AddressSid': address_sid,
'Email': email,
'CcEmails': serialize.map(cc_emails, lambda e: e),
'Status': status,
'ContactTitle': contact_title,
'ContactPhoneNumber': contact_phone_number,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return AuthorizationDocumentInstance(self._version, payload, sid=self._solution['sid'], )
@property
def dependent_hosted_number_orders(self):
"""
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
if self._dependent_hosted_number_orders is None:
self._dependent_hosted_number_orders = DependentHostedNumberOrderList(
self._version,
signing_document_sid=self._solution['sid'],
)
return self._dependent_hosted_number_orders
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentContext {}>'.format(context)
class AuthorizationDocumentInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
class Status(object):
OPENED = "opened"
SIGNING = "signing"
SIGNED = "signed"
CANCELED = "canceled"
FAILED = "failed"
def __init__(self, version, payload, sid=None):
"""
Initialize the AuthorizationDocumentInstance
:returns: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
super(AuthorizationDocumentInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'address_sid': payload.get('address_sid'),
'status': payload.get('status'),
'email': payload.get('email'),
'cc_emails': payload.get('cc_emails'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AuthorizationDocumentContext for this AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentContext
"""
if self._context is None:
self._context = AuthorizationDocumentContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: AuthorizationDocument sid.
:rtype: unicode
"""
return self._properties['sid']
@property
def address_sid(self):
"""
:returns: Address sid.
:rtype: unicode
"""
return self._properties['address_sid']
@property
def status(self):
"""
:returns: The Status of this AuthorizationDocument.
:rtype: AuthorizationDocumentInstance.Status
"""
return self._properties['status']
@property
def email(self):
"""
:returns: Email.
:rtype: unicode
"""
return self._properties['email']
@property
def cc_emails(self):
"""
:returns: A list of emails.
:rtype: unicode
"""
return self._properties['cc_emails']
@property
def date_created(self):
"""
:returns: The date this AuthorizationDocument was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this AuthorizationDocument was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the AuthorizationDocumentInstance
:returns: The fetched AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return self._proxy.fetch()
def update(self, hosted_number_order_sids=values.unset,
address_sid=values.unset, email=values.unset, cc_emails=values.unset,
status=values.unset, contact_title=values.unset,
contact_phone_number=values.unset):
"""
Update the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param unicode contact_title: Title of signee of this Authorization Document.
:param unicode contact_phone_number: Authorization Document's signee's phone number.
:returns: The updated AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return self._proxy.update(
hosted_number_order_sids=hosted_number_order_sids,
address_sid=address_sid,
email=email,
cc_emails=cc_emails,
status=status,
contact_title=contact_title,
contact_phone_number=contact_phone_number,
)
@property
def dependent_hosted_number_orders(self):
"""
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
return self._proxy.dependent_hosted_number_orders
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.HostedNumbers.AuthorizationDocumentInstance {}>'.format(context)
|
Vagab0nd/SiCKRAGE
|
lib3/twilio/rest/preview/hosted_numbers/authorization_document/__init__.py
|
Python
|
gpl-3.0
| 20,006
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
try:
import resource # pylint: disable=F0401
except ImportError:
resource = None # Not available on all platforms
from telemetry.core.platform import posix_platform_backend
class MacPlatformBackend(posix_platform_backend.PosixPlatformBackend):
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
vm_stat = self._RunCommand(['vm_stat'])
for stat in vm_stat.splitlines():
key, value = stat.split(':')
if key == 'Pages active':
pages_active = int(value.strip()[:-1]) # Strip trailing '.'
return pages_active * resource.getpagesize() / 1024
return 0
def GetMemoryStats(self, pid):
rss_vsz = self._GetPsOutput(['rss', 'vsz'], pid)
if rss_vsz:
rss, vsz = rss_vsz[0].split()
return {'VM': 1024 * int(vsz),
'WorkingSetSize': 1024 * int(rss)}
return {}
def GetOSName(self):
return 'mac'
def GetOSVersionName(self):
os_version = os.uname()[2]
if os_version.startswith('9.'):
return 'leopard'
if os_version.startswith('10.'):
return 'snowleopard'
if os_version.startswith('11.'):
return 'lion'
if os_version.startswith('12.'):
return 'mountainlion'
#if os_version.startswith('13.'):
# return 'mavericks'
|
pozdnyakov/chromium-crosswalk
|
tools/telemetry/telemetry/core/platform/mac_platform_backend.py
|
Python
|
bsd-3-clause
| 1,792
|
import PIL.Image
from PIL.ExifTags import TAGS, GPSTAGS
# https://gist.github.com/erans/983821
def get_lat_lon(exif_data):
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
def get_time_pic_taken(exif_data):
if "DateTimeOriginal" in exif_data:
date = exif_data["DateTimeOriginal"]
return date
# https://gist.github.com/erans/983821
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_exif(pic_path):
img = PIL.Image.open(pic_path)
exif_d = get_exif_data(img)
return exif_d
# https://gist.github.com/erans/983821
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
# https://gist.github.com/erans/983821
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
|
JBmiog/IOT
|
server_script/exif_reader.py
|
Python
|
gpl-3.0
| 2,314
|
# uncompyle6 version 2.9.10
# Python bytecode 2.6 (62161)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: c:\Temp\build\ZIBE\shell.py
# Compiled at: 2013-03-28 22:23:46
import cmd
import string
import binascii
from context_mgr import *
from plugin_manager import PluginManager
from zibe_errors import error_codes
import shlex
import logging
import zbutil
import time
import sys
import os
import exceptions
from Queue import Queue
import io
import re
import traceback
import inspect
from functools import wraps
from codecs import StreamWriter, StreamReader, getwriter, getreader
sys.path.append(os.path.dirname(__file__))
try:
import readline
HAVE_READLINE = True
except:
HAVE_READLINE = False
class OutputWrapper(StreamWriter):
def __init__(self, stream, errors='strict', level=logging.INFO, logger=None):
StreamWriter.__init__(self, stream, errors=errors)
self.loglevel = level
if logger:
self.logger = logger
else:
self.logger = logging.getLogger()
def write(self, data):
if data not in ('\n', '\r'):
self.logger.log(self.loglevel, data)
self.stream.write(data)
def writelines(self, lines):
for l in line:
self.write(l)
def reset(self):
pass
def seek(self, where):
if not isinstance(self.stream, file):
self.stream.seek(where)
class InputWrapper(StreamReader):
def __init__(self, stream, errors='strict', level=logging.INFO, logger=None):
StreamReader.__init__(self, stream, errors=errors)
self.logger = logging.getLogger()
self.loglevel = level
if logger:
self.logger = logger
else:
self.logger = logging.getLogger()
def read(self, size=-1, chars=-1, firstline=False):
data = self.stream.read(size)
self.logger.log(self.loglevel, data.strip('\n'))
return data
def readline(self, size=-1, keepends=True):
line = self.stream.readline(size)
self.logger.log(self.loglevel, line.strip('\n'))
return line
def readlines(self, sizehint=-1, keepends=True):
lines = self.stream.readlines(sizehint)
for l in lines:
self.logger.log(self.loglevel, l.strip('\n'))
return lines
def reset(self):
pass
class ZIBEShell(cmd.Cmd):
plugin_mgr = None
prompt = 'ZIBE> '
zb_context = None
def __init__(self, context=None, completekey='tab', stdin=sys.stdin, stdout=sys.stdout, logger=None):
cmd.Cmd.__init__(self, completekey, stdin, stdout)
self.plugin_mgr = PluginManager()
self.ctx = context
self.use_rawinput = 1
self.ioqueue = []
self.stdout = OutputWrapper(stdout, logger=logger)
self.stdin = InputWrapper(stdin, logger=logger)
self.stderr = OutputWrapper(sys.stderr, level=logging.ERROR, logger=logger)
def format_datetime(self, dt):
try:
return '%.2d/%.2d/%.4d %.2d:%.2d' % (dt.month, dt.day, dt.year, dt.hour, dt.minute)
except:
return '<error calculating>'
def emptyline(self):
pass
def prompt_for_input(self, prompt):
if self.use_rawinput:
try:
line = raw_input(prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
return line
def default(self, stdin, stdout, stderr, ctx, args):
self.stdout.write('*** Unknown syntax: %s\n' % cmd)
def onecmd(self, line, stdin, stdout, stderr):
cmd, args, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(stdin, stdout, stderr, self.ctx, args)
self.lastcmd = line
if cmd == '':
return self.default(stdin, stdout, stderr, self.ctx, args)
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
if self.plugin_mgr.handler_exists(cmd):
func = self.plugin_mgr.get_handler_func(cmd)
else:
func = self.default
args = zbutil.parseargs(args)
return func(stdin, stdout, stderr, self.ctx, args)
return
def get_compstate(self, text, arglist):
if not text:
return len(arglist)
return max(len(arglist) - 1, 0)
def complete(self, text, state):
if state == 0:
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx > 0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError, e:
compfunc = self.completedefault
else:
compfunc = self.completenames
try:
self.completion_matches = compfunc(text, origline, begidx, endidx)
except Exception, e:
import traceback
logging.error('Error doing completion function: %s' % e)
logging.error(traceback.format_exc())
try:
return self.completion_matches[state]
except IndexError:
return
return
def completedefault(self, *ignored):
return []
def getcommands(self, line):
cmds = line.split('|')
ret = []
pipes = []
if len(cmds) == 1:
return [(cmds[0], self.stdin, self.stdout, self.stderr)]
pipes.append(io.BytesIO())
ret.append((cmds[0], self.stdin, OutputWrapper(pipes[0]), self.stderr))
for i in xrange(1, len(cmds) - 1):
pipes.append(io.ByteIO())
ret.append((cmds[i], InputWrapper(pipes[i - 1]), OutputWrapper(pipes[i]), self.stderr))
ret.append((cmds[-1], InputWrapper(pipes[-1]), self.stdout, self.stderr))
return ret
def precmd(self, cmd):
return cmd
def postcmd(self, stop, line):
return stop
def cmdloop(self, intro=None):
self.preloop()
if self.use_rawinput and self.completekey:
try:
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ': complete')
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
print >> self.stdout, str(self.intro)
stop = None
while not stop:
try:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
self.stdout.write(self.prompt + line)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
logging.info(self.prompt)
logging.info('[INPUT] ' + line)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
logging.log(5, line)
cmds = self.getcommands(line)
for c, stdin, stdout, stderr in cmds:
cmd = self.precmd(c)
stop = self.onecmd(cmd, stdin, stdout, stderr)
stop = self.postcmd(stop, line)
stdout.seek(0)
except ZIBEException, e:
print >> stderr, 'Command exception: %s' % e
logging.debug(traceback.format_exc())
except KeyboardInterrupt:
self.stdout.write('')
except Exception, e:
print >> stderr, 'Unknown error: %s' % e
logging.debug(traceback.format_exc())
self.postloop()
finally:
if self.use_rawinput and self.completekey:
pass
else:
try:
readline.set_completer(self.old_completer)
except ImportError:
pass
return
def complete_files(self, text, line, begidx, endidx):
if self.zb_context is None:
print >> stderr, 'No context is active'
return []
arglist = zbutil.parseargs(line)
args = [arglist[0], ' '.join(arglist[1:])]
try:
directory = self.ctx.get_cwd()
if len(args[1]) > 0:
directory = os.path.join(directory, args[1])
file_list = self.ctx.dir_list(os.path.split(directory)[0])
return [ file['filename'] for file in file_list if file['filename'].lower().startswith(text.lower()) if not file['attributes'] & WIN32Constants.FILE_ATTRIBUTE_DIRECTORY
]
except ZIBEException, err:
print >> stderr, 'Error in complete_files: %s' % err
return []
return
def complete_directories(self, text, line, begidx, endidx):
if self.zb_context is None:
print >> stderr, 'No context is active'
return []
arglist = zbutil.parseargs(line)
args = [arglist[0], ' '.join(arglist[1:])]
try:
directory = self.ctx.get_cwd()
if len(args[1]) > 0:
directory = os.path.join(directory, args[1])
file_list = self.ctx.dir_list(os.path.split(directory)[0])
return [ file['filename'] for file in file_list if file['filename'].lower().startswith(text.lower()) if file['attributes'] & WIN32Constants.FILE_ATTRIBUTE_DIRECTORY
]
except ZIBEException, err:
print >> stderr, 'Error in complete_directories: %s' % err
return []
return
def complete_regkeys(self, text, line, begidx, endidx):
if self.zb_context is None:
logging.warning('No context is active')
return []
arglist = zbutil.parseargs(line)
args = [
arglist[0], ' '.join(arglist[1:])]
try:
keys = self.ctx.enum_keys()
return [ key for key in keys if key.lower().startswith(text.lower()) ]
except ZIBEException, err:
print >> stderr, str(err)
return []
return
def complete_regvalues(self, text, line, begidx, endidx):
try:
key = self.ctx.get_cwk()
values = self.ctx.enum_value_names()
return [ value for value in values if value.lower().startswith(text.lower()) ]
except ZIBEException, err:
print >> stderr, 'Error in complete_get: %s' % err
return []
def help_grep(self, stdin, stdout, stderr):
print >> stdout, "<cmd> | grep [-i] <string> - Search for a string using grep from another command's output"
def do_grep(self, stdin, stdout, stderr, ctx, args):
flags = 0
if '-i' in args:
flags = re.IGNORECASE
args.remove('-i')
if len(args) == 1:
regex = re.compile(args[0], flags)
lines = stdin.read().split('\n')
matches = [ l for l in lines if regex.search(l) ]
for m in matches:
print >> stdout, m + ''
elif len(args) == 2:
raise NotImplementedError('Not yet implemented')
def print_plugin_help(self, plugin_name):
pass
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
print >> self.stdout, '%s' % str(header)
if self.ruler:
print >> self.stdout, '%s' % str(self.ruler * len(header))
self.columnize(cmds, maxcol - 1)
print >> self.stdout, ''
def help_help(self, stdin, stdout, stderr):
print >> stdout, 'help [command] - Get help on a specific command, or a list of commands\n no arguments are specified'
def do_help(self, stdin, stdout, stderr, ctx, args):
if args:
arg = args[0]
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc = getattr(self, 'do_' + arg).__doc__
if doc:
print >> stdout, '%s' % str(doc)
return
except AttributeError:
handler_info = self.plugin_mgr.get_handler_info(arg)
if handler_info and handler_info.has_key('help_func'):
print >> stdout, '%s' % handler_info['help_func']()
return
print >> stdout, '%s' % str(self.nohelp % arg)
return
func(stdin, stdout, stderr)
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]] = 1
names.sort()
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd = name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
plugins = []
for p in self.plugin_mgr.plugins():
plugins.append(p[0])
print >> stdout, '%s' % str(self.doc_leader)
self.print_topics(self.doc_header, cmds_doc, 15, 80)
self.print_topics(self.misc_header, help.keys(), 15, 80)
self.print_topics(self.undoc_header, cmds_undoc, 15, 80)
for p in self.plugin_mgr.plugins():
commands = self.plugin_mgr.plugin_commands(p[0])
self.print_topics(p[1] + ' commands:', commands, 15, 80)
print >> stdout, ''
def help_plugins(self, stdin, stdout, stderr):
print >> stdout, 'plugins - List all currently loaded plugins'
def do_plugins(self, stdin, stdout, stderr, ctx, args):
print >> stdout, '\nLoaded Plugins'
print >> stdout, '----------------------------------------------------'
for p in self.plugin_mgr.plugins():
print >> stdout, '%-20s : %-50s' % (p[0], p[1])
print >> stdout, ''
def help_lpwd(self, stdin, stdout, stderr):
print >> stdout, 'lpwd - Get the present working directory'
def do_lpwd(self, stdin, stdout, stderr, ctx, args):
self.do_lcd(stdin, stdout, stderr, ctx, [])
def complete_lcd(self, text, line, begidx, endidx):
line = line.encode('utf-8')
args = zbutil.parseargs(line)
try:
dirname, fname = os.path.split(args[1])
if dirname == '':
dirname = self.ctx.localdir
ret = []
for f in os.listdir(dirname):
relfile = os.path.relpath(os.path.join(dirname, f))
if f.lower().startswith(fname.lower()) and os.path.isdir(relfile):
ret.append(f)
return ret
except IndexError:
return []
def help_lcd(self, stdin, stdout, stderr):
print >> stdout, 'lcd <path> - Change the current working directory locally'
def do_lcd(self, stdin, stdout, stderr, ctx, args):
if len(args) == 0:
print >> stdout, 'Current directory: %s' % self.ctx.localdir
else:
try:
path = os.path.abspath(os.path.join(self.ctx.localdir, args[0]))
self.ctx.lcd(path)
print >> stdout, path
except exceptions.EnvironmentError, e:
logging.warning(str(e))
def help_ldir(self, stdin, stdout, stderr):
print >> stdout, 'ldir <relpath>- List directory contents for the current directory'
def do_ldir(self, stdin, stdout, stderr, ctx, args):
dircount = 0
filecount = 0
filesize = 0
path = self.ctx.localdir
if len(args) > 0:
path = os.path.join(path, args[0])
print >> stdout, 'Directory listing for %s' % path
for f in sorted(os.listdir(path)):
fqfile = os.path.join(path, f)
os.stat_float_times(False)
timetup = time.strptime(time.ctime(os.path.getatime(fqfile)))
strtime = time.strftime('%y/%m/%d %I:%M %p', timetup)
if os.path.isdir(f):
string = '%s <DIR> %7s %s' % (strtime, '', f)
print >> stdout, string
dircount += 1
else:
string = '%s %-5s %7d %s' % (strtime, '', os.path.getsize(fqfile), f)
print >> stdout, string
filecount += 1
filesize += os.path.getsize(fqfile)
print >> stdout, ' ' + '%d File(s)' % filecount + ' %d bytes' % filesize
print >> stdout, ' ' + '%d Dir(s)' % dircount
def complete_cd(self, text, line, begidx, endidx):
if self.zb_context is None:
print >> stderr, 'No context is active'
return []
if self.is_reghive(self.zb_context):
return self.complete_regkeys(text, line, begidx, endidx)
return self.complete_directories(text, line, begidx, endidx)
return
def help_cd(self, stdin, stdout, stderr):
print >> stdout, 'Change the current working directory or registry key'
def do_cd(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
print >> stderr, 'No context yet selected'
return
if len(args) == 0:
print >> stdout, '%s' % self.get_current_working_path()
elif len(args) > 1:
print >> stderr, 'Invalid command'
else:
arg = zbutil.arg_to_utf8(args[0])
try:
if self.is_reghive(self.zb_context):
self.ctx.change_cwk(arg)
else:
self.ctx.change_directory(arg)
except ZIBEException, err:
print >> stderr, str(err)
return
self.update_prompt()
return
def help_dir(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['dir [directory][pattern] - Performs a listing of the current context. If no',
' directory / pattern is provides, the current working directory /',
' key is used. Note that pattern matching is only supported for',
' file shares\n'])
def complete_dir(self, text, line, begidx, endidx):
return self.complete_directories(text, line, begidx, endidx)
def do_dir(self, sdtin, stdout, stderr, ctx, args):
if self.zb_context is None:
print >> stderr, 'No context yet selected'
elif self.is_reghive(self.zb_context):
try:
keys = self.ctx.enum_keys()
values = self.ctx.enum_values()
except ZIBEException, err:
print >> stderr, str(err) + ''
return
print >> stdout, '\n\n Registry listing for %s' % (self.zb_context + self.ctx.get_cwk())
maxlen = max(max(map(len, keys)) if keys else 0, max(map(lambda x: len(x['name']), values)) if values else 0)
fmt = '%%-%ds' % maxlen
for k in keys:
print >> stdout, fmt % k + ' <KEY>'
maxtypelen = max(map(lambda x: len(x['type']), values)) if len(values) > 0 else 0
fmt = '%%-%ds %%-%ds' % (maxlen, maxtypelen)
for v in values:
if v['type'] == 'REG_DWORD' or v['type'] == 'REG_DWORD_BIG_ENDIAN':
print >> stdout, fmt % (v['name'], v['type']) + ' %d' % v['data']
else:
vals = zbutil.hexdump(str(v['data']), width=16).split('\n')
val = vals[0]
try:
val = '\n'.join([val] + map(lambda x: ' ' * (maxlen + maxtypelen + 5) + x, vals[1:]))
except IndexError:
pass
print >> stdout, fmt % (v['name'], v['type']) + ' %s' % val
print >> stdout, ''
print >> stdout, '%16d Keys(s), %14d Value(s)' % (len(keys), len(values))
else:
try:
directory = None
if len(args) > 0:
directory = args[0]
file_list = self.ctx.dir_list(directory)
print >> stdout, '\n Directory listing for %s' % self.get_current_working_path()
folder_count = 0
file_count = 0
folder_size = 0
for file in file_list:
folder_size += file['filesize']
field2 = '<DIR> '
if file['attributes'] & WIN32Constants.FILE_ATTRIBUTE_DIRECTORY != WIN32Constants.FILE_ATTRIBUTE_DIRECTORY:
file_count += 1
field2 = '%14d' % file['filesize']
else:
folder_count += 1
if not zbutil.isprintable(file['filename']):
file['filename'] = binascii.hexlify(file['filename']) + ' (hexlified)'
print >> stdout, '%-19s %s %s' % (self.format_datetime(file['last_access_time']), field2, file['filename'])
print >> stdout, '%16d File(s) %-14d bytes' % (file_count, folder_size)
print >> stdout, '%16d Dirs(s)' % folder_count
except ZIBEException, err:
print >> stderr, str(err)
return
help_ls = help_dir
do_ls = do_dir
def help_contexts(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['contexts - Lists all of the available contexts. A context can either be an SMB ',
' share or a registry hive\n'])
def show_contexts(self, stdin, stdout, stderr, ctx, args):
try:
shares = ctx.enumerate_shares()
print >> stdout, 'Shares:'
print >> stdout, '%-20s %-40s %-8s' % ('Share Name', 'Remark', 'Type')
print >> stdout, '-' * 71
for s in shares:
if not zbutil.isprintable(s['remark']):
s['remark'] = binascii.hexlify(s['remark']) + ' (hexlified)'
print >> stdout, '%-20s %-40s %-8x' % (s['share_name'], s['remark'], s['type'])
print >> stdout, '\n'
print >> stdout, 'Registry Hives:\n'
print >> stdout, '%-30s %-30s' % ('Full Name', 'Access Name')
print >> stdout, '------------------------------------------------------------'
print >> stdout, '%-30s %-30s' % ('HKEY_LOCAL_MACHINE', 'HKLM')
print >> stdout, '%-30s %-30s' % ('HKEY_CLASSES_ROOT', 'HKCR')
print >> stdout, '%-30s %-30s' % ('HKEY_CURRENT_USER', 'HKCU')
print >> stdout, '%-30s %-30s' % ('HKEY_USERS', 'HKU')
print >> stdout, '%-30s %-30s' % ('HKEY_HKEY_PERFORMANCE_DATA', 'HKPD')
print >> stdout, ''
except ZIBEException, err:
print >> stderr, str(err)
if error_codes.get(err.id, ['', ''])[0] == 'NtErrorObjectNameNotFound':
print >> stderr, "It is possible the 'browser' service is not running. Please start the service if possible and try again"
def do_contexts(self, stdin, stdout, stderr, ctx, args):
self.show_contexts(stdin, stdout, stderr, ctx, args)
def help_addshare(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['addshare [share_name] [path] - Adds a new network share on the remote target. ',
' Note: share names should not contain spaces\n'])
def do_addshare(self, stdin, stdout, stderr, ctx, args):
if len(args) != 2:
print >> stderr, 'Invalid command syntax. Share name and path both required'
else:
try:
self.ctx.add_share(args[0], args[1])
print >> stdout, "Successfully added '%s -> %s" % (args[0], args[1])
except ZIBEException, err:
print >> stderr, str(err)
def complete_delshare(self, text, line, begidx, endidx):
return []
def complete_delshare(self, text, line, begidx, endidx):
return []
def help_delshare(self, stdin, stdout, stderr):
print >> stdout, 'delshare [share_name] - Removes a network share from the remote target'
def do_delshare(self, stdin, stdout, stderr, ctx, args):
if len(args) != 1:
print >> stderr, 'Invalid command syntax. Please only provide a share name'
else:
try:
self.ctx.delete_share(args[0])
print >> stdout, 'Successfully deleted share %s' % args[0]
except ZIBEException, err:
print >> stderr, str(err)
def help_quit(self, stdin, stdout, stderr):
print >> stdout, 'quit - Exits the shell application'
def do_quit(self, stdin, stdout, stderr, ctx, args):
try:
self.ctx.finish_session()
except ZIBEException, e:
print >> stderr, str(e)
return True
def help_exit(self, stdin, stdout, stderr):
print >> stdout, 'exit - Exits the shell application'
def do_exit(self, stdin, stdout, stderr, ctx, args):
return self.do_quit(stdin, stdout, stderr, ctx, args)
def is_reghive(self, name):
if name in ('HKCR', 'HKCU', 'HKLM', 'HKU', 'HKPD'):
return True
return False
def get_current_working_path(self):
if self.zb_context is None:
return 'ZIBE'
if self.is_reghive(self.zb_context):
return self.zb_context + self.ctx.get_cwk()
return self.zb_context + self.ctx.get_cwd()
return
def update_prompt(self):
self.prompt = self.get_current_working_path() + '> '
def help_del(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['del <filename> - Deletes the specified file. Wildcards are not supported \n'])
def complete_rmdir(self, text, line, begidx, endidx):
return []
def help_rmdir(self, stdin, stdout, stderr):
print >> stdout, 'rmdir [flags] <dirpath> \n Remove a directory and its contents (if possible). If -f or --force\n are specified as flags, then do not confirm removal'
def do_rmdir(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if len(args) == 0:
print >> stderr, 'The syntax of the command is incorrect'
return
if self.is_reghive(self.zb_context):
try:
self.ctx.delete_key(args[0])
except ZIBEException, err:
print >> stderr, str(err)
return
else:
try:
if args[0].startswith('\\'):
newdir = args[0]
else:
curdir = self.ctx.get_cwd()
newdir = os.path.join(curdir, args[0])
print >> stdout, 'Removing directory %s' % newdir
self.ctx.remove_directory(newdir)
print >> stdout, 'Removed!'
except ZIBEException, err:
print >> stderr, 'rmdir error: %s' % err
return
return
def help_mkdir(self, stdin, stdout, stderr):
print >> stdout, 'mkdir <dirname or path> - Make a new directory'
def do_mkdir(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if len(args) != 1:
print >> stderr, 'The syntax of the command is incorrect'
return
if self.is_reghive(self.zb_context):
try:
self.ctx.create_key(args[0])
except ZIBEException, err:
print >> stderr, str(err)
return
else:
try:
if args[0].startswith('\\'):
newdir = args[0]
else:
curdir = self.ctx.get_cwd()
newdir = os.path.join(curdir, args[0])
print >> stdout, 'Creating directory %s' % newdir
self.ctx.create_directory(newdir)
except ZIBEException, err:
print >> stderr, 'mkdir error: %s' % err
return
return
def do_del(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if len(args) == 0:
print >> stderr, 'The syntax of the command is incorrect'
if self.is_reghive(self.zb_context):
if args[0] == '*':
values = self.ctx.enum_value_names()
else:
values = args
for v in values:
try:
self.ctx.delete_value(v)
print >> stdout, 'Successfully deleted %s' % v
except ZIBEException, err:
print >> stderr, str(err)
if len(args) == 1 and '*' in args[0]:
files = [ f['filename'] for f in self.ctx.dir_list(args[0]) ]
else:
files = args
try:
for f in files:
self.ctx.delete_file(f)
print >> stdout, "Successfully deleted '" + f + "'"
except ZIBEException, err:
print >> stderr, str(err)
return
def help_cat(self, stdin, stdout, stderr):
print >> stdout, 'cat <filename> - Print file contents to the screen'
def do_cat(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if self.is_reghive(self.zb_context):
print >> stdout, 'cat not available in this context'
return
if len(args) != 1:
print >> stderr, 'Invalid syntax: cat [filename]'
return
try:
file_contents = self.ctx.get_file(args[0])
print >> stdout, file_contents
except ZIBEException, err:
print >> stderr, str(err)
return
return
def help_use(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['use [context_name] - Switches the current context. For a list of contexts use ',
" the 'context' command\n"])
def do_use(self, stdin, stdout, stderr, ctx, args):
if len(args) == 1:
reghive = args[0].upper()
if self.is_reghive(reghive):
self.zb_context = reghive
self.ctx.change_hive(reghive)
else:
try:
self.ctx.use_share(args[0])
self.zb_context = args[0]
except ZIBEException, err:
print >> stderr, str(err)
self.update_prompt()
elif len(args) == 0:
self.show_contexts(stdin, stdout, stderr, ctx, args)
else:
print >> stderr, 'Invalid syntax'
def help_setvalue(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['setvalue <name> <type> <value> - Set a registry type to a ',
' value. Binary values can be specified as arrays, ',
' decimal values, or hexadecimal values\n'])
def do_setvalue(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if self.is_reghive(self.zb_context) is False:
logging.warning('This command can only be used when in a registry context')
return
if len(args) < 3:
print >> stderr, 'The syntax of the command is incorrect'
return
if args[1] == 'REG_SZ':
value = args[2]
type = 1
elif args[1] == 'REG_MULTI_SZ':
value = args[2:]
type = 7
elif args[1] == 'REG_BINARY':
try:
value = zbutil.arg2value(args[2:])
except ValueError:
print >> stderr, 'Unable to convert hexidecimal data to binary buffer'
return
type = 3
elif args[1] == 'REG_DWORD':
try:
value = zbutil.arg2value(args[2], size=4)
except ValueError:
print >> stderr, 'Invalid integer value'
return
type = 4
else:
print >> stderr, 'Unsupported data type. Only REG_SZ, REG_MULTI_SZ, REG_DWORD, and REG_BINARY supported'
return
try:
logging.debug('set %s %s %s' % (args[0], type, value))
self.ctx.set_reg_value(args[0], type, value)
except ZIBEException, err:
print >> stderr, str(err)
return
def help_put(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['put <local_filename> <remote_filename> - Uploads a file to the remote ',
' target. This command only works in the SMB / file context. If',
' you want to set a registry value, use the setvalue command\n'])
def complete_put(self, text, line, begidx, endidx):
return []
def do_put(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
logging.warning('No context selected')
return
if len(args) != 2:
print >> stderr, 'The syntax of the command is incorrect'
return
try:
with open(args[0], 'rb') as fp:
file = fp.read()
except IOError, err:
print >> stderr, 'Failed to open local file: %s' % str(err)
return
try:
self.ctx.put_file(args[1], file)
print >> stdout, 'Successfully put 1 file'
except ZIBEException, err:
print >> stderr, str(err)
return
def _print_regvalue(self, stdin, stdout, stderr, data):
print >> stdout, ' Name: %-20s Type: %-15s' % (data[0], data[1])
if data[1] == 'REG_SZ' or data[1] == 'REG_EXPAND_SZ':
print >> stdout, " '%s'" % data[2]
elif data[1] == 'REG_DWORD' or data[1] == 'REG_QWORD':
print >> stdout, ' 0x%.8x (%d)' % (data[2], data[2])
elif data[1] == 'REG_MULTI_SZ':
for s in data[2]:
print >> stdout, " '%s'" % s
else:
if data[1] == 'REG_BINARY':
i = 1
hex_rep = ''
string_rep = ''
for c in data[2]:
hex_rep += '%.2x ' % ord(c)
if c in string.printable:
string_rep += c
else:
string_rep += '.'
if i % 16 == 0:
print >> stdout, ' ' + hex_rep + ' ' + string_rep
hex_rep = ''
string_rep = ''
i += 1
if hex_rep != '':
print >> stdout, ' %-48s %s' % (hex_rep, string_rep)
print >> stderr, 'Unexpected data type'
def help_get(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['get - retrieves a file or registry value. This command functions ',
' differently based on the current context. When a registry key',
' is selected, use the following syntax:\n',
' get <value_name|*> Prints the specified registry value to the',
" console. The '*' character prints all values in",
' the current context.',
' get <remote_filename> [local_filename] - Downloads a file from ',
' the remote target, writing the result to the ',
' specified path. This will overwrite any existing file\n'])
def complete_get(self, text, line, begidx, endidx):
if self.is_reghive(self.zb_context):
return self.complete_regvalues(text, line, begidx, endidx)
return []
def do_get(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
print >> stderr, 'No context selected'
return
if self.is_reghive(self.zb_context):
if len(args) != 1:
print >> stderr, 'The syntax of the command is incorrect'
else:
try:
result = self.ctx.get_reg_values(args[0])
for r in result:
self._print_regvalue(stdin, stdout, stderr, r)
except ZIBEException, err:
print >> stderr, str(err)
return
else:
dst = None
src = None
if len(args) < 1 or len(args) > 2:
print >> stderr, 'You must provide a remote name at a minimum'
elif len(args) == 1:
dst = args[0]
filename = os.path.split(dst)[1]
src = os.path.join(self.ctx.localdir, filename)
elif len(args) == 2:
dst = args[0]
src = args[1]
try:
file_contents = self.ctx.get_file(dst)
with open(src, 'wb') as fh:
fh.write(file_contents)
print >> stdout, '(%d bytes) [remote]%s -> [local]%s' % (len(file_contents), dst, src)
except ZIBEException, err:
print >> stdout, 'Failed! %s' % str(err)
print >> stderr, str(err)
return
return
def complete_info(self, text, line, begidx, endidx):
return self.complete_files(text, line, begidx, endidx)
def help_info(self, stdin, stdout, stderr):
print >> stdout, '\n'.join(['info <filename> - Retrieves information about the specified filename. Note that ',
' this command only works in the SMB file context\n'])
def do_info(self, stdin, stdout, stderr, ctx, args):
if self.zb_context == None:
print >> stderr, 'No context selected'
return
if len(args) == 1:
if self.is_reghive(self.zb_context):
print >> stderr, 'Command not supported in this context'
return
try:
fi = self.ctx.get_file_details(args[0])
except ZIBEException, err:
print >> stderr, str(err)
return
print >> stdout, 'Information for file %s' % fi['filename']
print >> stdout, ' Creation Time: %s' % self.format_datetime(fi['creation_time'])
print >> stdout, ' File Size: %s' % str(fi['filesize'])
print >> stdout, ' Last Access: %s' % self.format_datetime(fi['last_access_time'])
print >> stdout, ' Last Write: %s' % self.format_datetime(fi['last_write_time'])
print >> stdout, ' Attributes: 0x%x' % fi['attributes']
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_READONLY:
print >> stdout, ' FILE_ATTRIBUTE_READONLY'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_HIDDEN:
print >> stdout, ' FILE_ATTRIBUTE_HIDDEN'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_SYSTEM:
print >> stdout, ' FILE_ATTRIBUTE_SYSTEM'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_DIRECTORY:
print >> stdout, ' FILE_ATTRIBUTE_DIRECTORY'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_ARCHIVE:
print >> stdout, ' FILE_ATTRIBUTE_ARCHIVE'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_NORMAL:
print >> stdout, ' FILE_ATTRIBUTE_NORMAL'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_TEMPORARY:
print >> stdout, ' FILE_ATTRIBUTE_TEMPORARY'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_COMPRESSED:
print >> stdout, ' FILE_ATTRIBUTE_COMPRESSED'
if fi['attributes'] & WIN32Constants.FILE_ATTRIBUTE_ENCRYPTED:
print >> stdout, ' FILE_ATTRIBUTE_ENCRYPTED'
print >> stdout, ''
else:
print >> stderr, 'Error: No file specified'
return
def help_versionstrings(self, stdin, stdout, stderr):
print >> stdout, 'Get remotely reported version strings'
def do_versionstrings(self, stdin, stdout, stderr, ctx, args):
osStr, lmStr = ctx.get_version_strings()
print >> stdout, 'OS String: %s' % osStr
print >> stdout, 'LM String: %s' % lmStr
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/exploits/ZIBE/shell.py
|
Python
|
unlicense
| 42,519
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-08-21 17:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('protocoloadm', '0023_auto_20190711_1755'),
('protocoloadm', '0023_merge_20190802_1112'),
]
operations = [
]
|
interlegis/sapl
|
sapl/protocoloadm/migrations/0024_merge_20190821_1418.py
|
Python
|
gpl-3.0
| 350
|
from .utils import captitle
class BaseModel(object):
title = None
def __init__(self):
self.__name__ = self.__class__.__name__
self.slug = self.__name__.lower()
if self.title is None:
self.title = captitle(self.__name__)
def __str__(self):
return self.__name__
|
byashimov/django-controlcenter
|
controlcenter/base.py
|
Python
|
bsd-3-clause
| 320
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Basic object model of an Impala cluster (set of Impala processes).
import json
import logging
import os
import pipes
import psutil
import socket
import sys
import time
from getpass import getuser
from random import choice
from signal import SIGKILL
from subprocess import check_call
from time import sleep
if sys.version_info >= (2, 7):
# We use some functions in the docker code that don't exist in Python 2.6.
from subprocess import check_output
from tests.common.impala_service import (
CatalogdService,
ImpaladService,
StateStoredService)
from tests.util.shell_util import exec_process, exec_process_async
LOG = logging.getLogger('impala_cluster')
LOG.setLevel(level=logging.DEBUG)
IMPALA_HOME = os.environ['IMPALA_HOME']
START_DAEMON_PATH = os.path.join(IMPALA_HOME, 'bin/start-daemon.sh')
DEFAULT_BEESWAX_PORT = 21000
DEFAULT_HS2_PORT = 21050
DEFAULT_BE_PORT = 22000
DEFAULT_KRPC_PORT = 27000
DEFAULT_CATALOG_SERVICE_PORT = 26000
DEFAULT_STATE_STORE_SUBSCRIBER_PORT = 23000
DEFAULT_IMPALAD_WEBSERVER_PORT = 25000
DEFAULT_STATESTORED_WEBSERVER_PORT = 25010
DEFAULT_CATALOGD_WEBSERVER_PORT = 25020
DEFAULT_IMPALAD_JVM_DEBUG_PORT = 30000
DEFAULT_CATALOGD_JVM_DEBUG_PORT = 30030
# Timeout to use when waiting for a cluster to start up. Set quite high to avoid test
# flakiness.
CLUSTER_WAIT_TIMEOUT_IN_SECONDS = 240
# Represents a set of Impala processes.
# Handles two cases:
# * The traditional minicluster with many processes running as the current user on
# the local system. In this case various settings are detected based on command
# line options(beeswax_port, webserver_port, etc)
# * The docker minicluster with one container per process connected to a user-defined
# bridge network.
class ImpalaCluster(object):
def __init__(self, docker_network=None):
self.docker_network = docker_network
self.refresh()
def refresh(self):
""" Re-loads the impalad/statestored/catalogd processes if they exist.
Helpful to confirm that processes have been killed.
"""
if self.docker_network is None:
self.__impalads, self.__statestoreds, self.__catalogd =\
self.__build_impala_process_lists()
else:
self.__impalads, self.__statestoreds, self.__catalogd =\
self.__find_docker_containers()
LOG.info("Found %d impalad/%d statestored/%d catalogd process(es)" %
(len(self.__impalads), len(self.__statestoreds), 1 if self.__catalogd else 0))
@property
def statestored(self):
"""
Returns the statestore process
Note: Currently we expectly a single statestore process, in the future this might
change in which case this should return the "active" statestore.
"""
# If no statestored process exists, return None.
return self.__statestoreds[0] if len(self.__statestoreds) > 0 else None
@property
def impalads(self):
"""Returns a list of the known impalad processes"""
return self.__impalads
@property
def catalogd(self):
"""Returns the catalogd process, or None if no catalogd process was found"""
return self.__catalogd
def get_first_impalad(self):
return self.impalads[0]
def get_any_impalad(self):
"""Selects a random impalad from the list of known processes"""
return choice(self.impalads)
def get_different_impalad(self, other_impalad):
"""Selects an impalad that is different from the given impalad"""
if len(self.impalads) <= 1:
assert 0, "Only %d impalads available to choose from" % len(self.impalads)
LOG.info("other_impalad: " + str(other_impalad))
LOG.info("Cluster: " + str(len(self.impalads)))
LOG.info("Cluster: " + str(self.impalads))
return choice([impalad for impalad in self.impalads if impalad != other_impalad])
def num_responsive_coordinators(self):
"""Find the number of impalad coordinators that can evaluate a test query."""
n = 0
for impalad in self.impalads:
try:
client = impalad.service.create_beeswax_client()
result = client.execute("select 1")
assert result.success
++n
except Exception as e: print e
finally:
client.close()
return n
def wait_until_ready(self, expected_num_impalads=1, expected_num_ready_impalads=None):
"""Waits for this 'cluster' to be ready to submit queries.
A cluster is deemed "ready" if:
- expected_num_impalads impala processes are up (or, if not specified, at least
one impalad is up).
- expected_num_ready_impalads backends are registered with the statestore.
expected_num_ready_impalads defaults to expected_num_impalads.
- All impalads knows about all other ready impalads.
- Each coordinator impalad's catalog cache is ready.
This information is retrieved by querying the statestore debug webpage
and each individual impalad's metrics webpage.
"""
self.wait_for_num_impalads(expected_num_impalads)
# TODO: fix this for coordinator-only nodes as well.
if expected_num_ready_impalads is None:
expected_num_ready_impalads = len(self.impalads)
for impalad in self.impalads:
impalad.service.wait_for_num_known_live_backends(expected_num_ready_impalads,
timeout=CLUSTER_WAIT_TIMEOUT_IN_SECONDS, interval=2)
if (impalad._get_arg_value("is_coordinator", default="true") == "true" and
impalad._get_arg_value("stress_catalog_init_delay_ms", default=0) == 0):
impalad.wait_for_catalog()
def wait_for_num_impalads(self, num_impalads, retries=10):
"""Checks that at least 'num_impalads' impalad processes are running, along with
the statestored and catalogd.
Refresh until the number running impalad processes reaches the expected
number based on num_impalads, or the retry limit is hit. Failing this, raise a
RuntimeError.
"""
for i in range(retries):
if len(self.impalads) < num_impalads or not self.statestored or not self.catalogd:
sleep(1)
self.refresh()
msg = ""
if len(self.impalads) < num_impalads:
msg += "Expected {expected_num} impalad(s), only {actual_num} found\n".format(
expected_num=num_impalads, actual_num=len(self.impalads))
if not self.statestored:
msg += "statestored failed to start.\n"
if not self.catalogd:
msg += "catalogd failed to start.\n"
if msg:
raise RuntimeError(msg)
def __build_impala_process_lists(self):
"""
Gets all the running Impala procs (with start arguments) on the machine.
Note: This currently only works for the local case. To support running in a cluster
environment this would need to enumerate each machine in the cluster.
"""
impalads = list()
statestored = list()
catalogd = None
for process in find_user_processes(['impalad', 'catalogd', 'statestored']):
# IMPALA-6889: When a process shuts down and becomes a zombie its cmdline becomes
# empty for a brief moment, before it gets reaped by its parent (see man proc). We
# copy the cmdline to prevent it from changing between the following checks and
# the construction of the *Process objects.
cmdline = ''
try:
cmdline = process.cmdline
except psutil.NoSuchProcess:
# IMPALA-8320: psutil.Process.cmdline is a property and the process could have
# disappeared between the time we built the process list and now.
continue
if len(cmdline) == 0:
continue
if process.name == 'impalad':
impalads.append(ImpaladProcess(cmdline))
elif process.name == 'statestored':
statestored.append(StateStoreProcess(cmdline))
elif process.name == 'catalogd':
catalogd = CatalogdProcess(cmdline)
# If the operating system PIDs wrap around during startup of the local minicluster,
# the order of the impalads is incorrect. We order them by their HS2 port, so that
# get_first_impalad() always returns the first one. We need to use a port that is
# exposed and mapped to a host port for the containerised cluster.
impalads.sort(key=lambda i: i.service.hs2_port)
return impalads, statestored, catalogd
def __find_docker_containers(self):
"""
Gets all the running Impala containers on self.docker_network.
"""
impalads = []
statestoreds = []
catalogd = None
output = check_output(["docker", "network", "inspect", self.docker_network])
# Only one network should be present in the top level array.
for container_id in json.loads(output)[0]["Containers"]:
container_info = self._get_container_info(container_id)
if container_info["State"]["Status"] != "running":
# Skip over stopped containers.
continue
args = container_info["Args"]
executable = os.path.basename(args[0])
port_map = {}
for k, v in container_info["NetworkSettings"]["Ports"].iteritems():
# Key looks like "25000/tcp"..
port = int(k.split("/")[0])
# Value looks like { "HostPort": "25002", "HostIp": "" }.
host_port = int(v[0]["HostPort"])
port_map[port] = host_port
if executable == 'impalad':
impalads.append(ImpaladProcess(args, container_id=container_id,
port_map=port_map))
elif executable == 'statestored':
statestoreds.append(StateStoreProcess(args, container_id=container_id,
port_map=port_map))
elif executable == 'catalogd':
assert catalogd is None
catalogd = CatalogdProcess(args, container_id=container_id,
port_map=port_map)
impalads.sort(key=lambda i: i.service.be_port)
return impalads, statestoreds, catalogd
def _get_container_info(self, container_id):
"""Get the output of "docker container inspect" as a python data structure."""
containers = json.loads(
check_output(["docker", "container", "inspect", container_id]))
# Only one container should be present in the top level array.
assert len(containers) == 1, json.dumps(containers, indent=4)
return containers[0]
# Represents a process running on a machine and common actions that can be performed
# on a process such as restarting or killing. The process may be the main process in
# a Docker container, if the cluster is containerised (in this case container_id must
# be provided). Note that containerised processes are really just processes running
# on the local system with some additional virtualisation, so some operations are
# the same for both containerised and non-containerised cases.
#
# For containerised processes, 'port_map' should be provided to map from the container's
# ports to ports on the host. Methods from this class always return the host port.
class Process(object):
def __init__(self, cmd, container_id=None, port_map=None):
assert cmd is not None and len(cmd) >= 1,\
'Process object must be created with valid command line argument list'
assert container_id is None or port_map is not None,\
"Must provide port_map for containerised process"
self.cmd = cmd
self.container_id = container_id
self.port_map = port_map
def __class_name(self):
return self.__class__.__name__
def __str__(self):
return "<%s PID: %s (%s)>" % (self.__class_name(), self.__get_pid(),
' '.join(self.cmd))
def __repr__(self):
return str(self)
def get_pid(self):
"""Gets the PID of the process. Returns None if the PID cannot be determined"""
pid = self.__get_pid()
if pid:
LOG.info("Found PID %s for %s" % (pid, " ".join(self.cmd)))
else:
LOG.info("No PID found for process cmdline: %s. Process is dead?" %
" ".join(self.cmd))
return pid
def get_pids(self):
"""Gets the PIDs of the process. In some circumstances, a process can run multiple
times, e.g. when it forks in the Breakpad crash handler. Returns an empty list if no
PIDs can be determined."""
pids = self.__get_pids()
if pids:
LOG.info("Found PIDs %s for %s" % (", ".join(map(str, pids)), " ".join(self.cmd)))
else:
LOG.info("No PID found for process cmdline: %s. Process is dead?" %
" ".join(self.cmd))
return pids
def __get_pid(self):
pids = self.__get_pids()
assert len(pids) < 2, "Expected single pid but found %s" % ", ".join(map(str, pids))
return len(pids) == 1 and pids[0] or None
def __get_pids(self):
if self.container_id is not None:
container_info = self._get_container_info(self.container_id)
if container_info["State"]["Status"] != "running":
return []
return [container_info["State"]["Status"]["Pid"]]
# In non-containerised case, search for process based on matching command lines.
pids = []
for pid in psutil.get_pid_list():
try:
process = psutil.Process(pid)
if set(self.cmd) == set(process.cmdline):
pids.append(pid)
except psutil.NoSuchProcess:
# A process from get_pid_list() no longer exists, continue. We don't log this
# error since it can refer to arbitrary processes outside of our testing code.
pass
return pids
def kill(self, signal=SIGKILL):
"""
Kills the given processes.
"""
if self.container_id is None:
pid = self.__get_pid()
assert pid is not None, "No processes for %s" % self
LOG.info('Killing %s with signal %s' % (self, signal))
exec_process("kill -%d %d" % (signal, pid))
else:
LOG.info("Stopping container: {0}".format(self.container_id))
check_call(["docker", "container", "stop", self.container_id])
def start(self):
"""Start the process with the same arguments after it was stopped."""
if self.container_id is None:
binary = os.path.basename(self.cmd[0])
restart_args = self.cmd[1:]
LOG.info("Starting {0} with arguments".format(binary, restart_args))
run_daemon(binary, restart_args)
else:
LOG.info("Starting container: {0}".format(self.container_id))
check_call(["docker", "container", "start", self.container_id])
def restart(self):
"""Kills and restarts the process"""
self.kill()
self.wait_for_exit()
self.start()
def wait_for_exit(self):
"""Wait until the process exits (or return immediately if it already has exited."""
LOG.info('Waiting for exit: {0} (PID: {1})'.format(
' '.join(self.cmd), self.get_pid()))
while self.__get_pid() is not None:
sleep(0.01)
# Base class for all Impala processes
class BaseImpalaProcess(Process):
def __init__(self, cmd, container_id=None, port_map=None):
super(BaseImpalaProcess, self).__init__(cmd, container_id, port_map)
self.hostname = self._get_hostname()
def _get_webserver_port(self, default=None):
return int(self._get_port('webserver_port', default))
def _get_webserver_certificate_file(self):
# TODO: if this is containerised, the path will likely not be the same on the host.
return self._get_arg_value("webserver_certificate_file", "")
def _get_hostname(self):
return self._get_arg_value("hostname", socket.gethostname())
def _get_arg_value(self, arg_name, default=None):
"""Gets the argument value for given argument name"""
for arg in self.cmd:
if ('%s=' % arg_name) in arg.strip().lstrip('-'):
return arg.split('=')[1]
if default is None:
assert 0, "No command line argument '%s' found." % arg_name
return default
def _get_port(self, arg_name, default):
"""Return the host port for the specified by the command line argument 'arg_name'.
If 'self.port_map' is set, maps from container ports to host ports."""
port = int(self._get_arg_value(arg_name, default))
if self.port_map is not None:
port = self.port_map.get(port, port)
return port
# Represents an impalad process
class ImpaladProcess(BaseImpalaProcess):
def __init__(self, cmd, container_id=None, port_map=None):
super(ImpaladProcess, self).__init__(cmd, container_id, port_map)
self.service = ImpaladService(self.hostname,
self._get_webserver_port(
default=DEFAULT_IMPALAD_WEBSERVER_PORT),
self.__get_beeswax_port(),
self.__get_be_port(),
self.__get_hs2_port(),
self._get_webserver_certificate_file())
def __get_beeswax_port(self):
return int(self._get_port('beeswax_port', DEFAULT_BEESWAX_PORT))
def __get_be_port(self):
return int(self._get_port('be_port', DEFAULT_BE_PORT))
def __get_hs2_port(self):
return int(self._get_port('hs2_port', DEFAULT_HS2_PORT))
def start(self, wait_until_ready=True):
"""Starts the impalad and waits until the service is ready to accept connections."""
restart_args = self.cmd[1:]
LOG.info("Starting Impalad process with args: {0}".format(restart_args))
run_daemon("impalad", restart_args)
if wait_until_ready:
self.service.wait_for_metric_value('impala-server.ready',
expected_value=1, timeout=30)
def wait_for_catalog(self):
"""Waits for a catalog copy to be received by the impalad. When its received,
additionally waits for client ports to be opened."""
start_time = time.time()
beeswax_port_is_open = False
hs2_port_is_open = False
num_dbs = 0
num_tbls = 0
while ((time.time() - start_time < CLUSTER_WAIT_TIMEOUT_IN_SECONDS) and
not (beeswax_port_is_open and hs2_port_is_open)):
try:
num_dbs, num_tbls = self.service.get_metric_values(
["catalog.num-databases", "catalog.num-tables"])
beeswax_port_is_open = self.service.beeswax_port_is_open()
hs2_port_is_open = self.service.hs2_port_is_open()
except Exception:
LOG.exception(("Client services not ready. Waiting for catalog cache: "
"({num_dbs} DBs / {num_tbls} tables). Trying again ...").format(
num_dbs=num_dbs,
num_tbls=num_tbls))
sleep(0.5)
if not hs2_port_is_open or not beeswax_port_is_open:
raise RuntimeError(
"Unable to open client ports within {num_seconds} seconds.".format(
num_seconds=CLUSTER_WAIT_TIMEOUT_IN_SECONDS))
# Represents a statestored process
class StateStoreProcess(BaseImpalaProcess):
def __init__(self, cmd, container_id=None, port_map=None):
super(StateStoreProcess, self).__init__(cmd, container_id, port_map)
self.service = StateStoredService(self.hostname,
self._get_webserver_port(default=DEFAULT_STATESTORED_WEBSERVER_PORT),
self._get_webserver_certificate_file())
# Represents a catalogd process
class CatalogdProcess(BaseImpalaProcess):
def __init__(self, cmd, container_id=None, port_map=None):
super(CatalogdProcess, self).__init__(cmd, container_id, port_map)
self.service = CatalogdService(self.hostname,
self._get_webserver_port(default=DEFAULT_CATALOGD_WEBSERVER_PORT),
self._get_webserver_certificate_file(), self.__get_port())
def __get_port(self):
return int(self._get_port('catalog_service_port', DEFAULT_CATALOG_SERVICE_PORT))
def start(self, wait_until_ready=True):
"""Starts catalogd and waits until the service is ready to accept connections."""
restart_args = self.cmd[1:]
LOG.info("Starting Catalogd process: {0}".format(restart_args))
run_daemon("catalogd", restart_args)
if wait_until_ready:
self.service.wait_for_metric_value('statestore-subscriber.connected',
expected_value=1, timeout=30)
def find_user_processes(binaries):
"""Returns an iterator over all processes owned by the current user with a matching
binary name from the provided list."""
for pid in psutil.get_pid_list():
try:
process = psutil.Process(pid)
if process.username == getuser() and process.name in binaries: yield process
except KeyError, e:
if "uid not found" not in str(e):
raise
except psutil.NoSuchProcess, e:
# Ignore the case when a process no longer exists.
pass
def run_daemon(daemon_binary, args, build_type="latest", env_vars={}, output_file=None):
"""Starts up an impalad with the specified command line arguments. args must be a list
of strings. An optional build_type parameter can be passed to determine the build type
to use for the impalad instance. Any values in the env_vars override environment
variables inherited from this process. If output_file is specified, stdout and stderr
are redirected to that file.
"""
bin_path = os.path.join(IMPALA_HOME, "be", "build", build_type, "service",
daemon_binary)
redirect = ""
if output_file is not None:
redirect = "1>{0} 2>&1".format(output_file)
cmd = [START_DAEMON_PATH, bin_path] + args
# Use os.system() to start 'cmd' in the background via a shell so its parent will be
# init after the shell exits. Otherwise, the parent of 'cmd' will be py.test and we
# cannot cleanly kill it until py.test exits. In theory, Popen(shell=True) should
# achieve the same thing but it doesn't work on some platforms for some reasons.
sys_cmd = ("{set_cmds} {cmd} {redirect} &".format(
set_cmds=''.join(["export {0}={1};".format(k, pipes.quote(v))
for k, v in env_vars.iteritems()]),
cmd=' '.join([pipes.quote(tok) for tok in cmd]),
redirect=redirect))
os.system(sys_cmd)
|
cloudera/Impala
|
tests/common/impala_cluster.py
|
Python
|
apache-2.0
| 22,467
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import stix
import stix.utils
import stix.indicator.test_mechanism
from stix.common import EncodedCDATA
from stix.indicator.test_mechanism import _BaseTestMechanism
import stix.bindings.extensions.test_mechanism.yara as yara_tm_binding
class YaraTestMechanism(_BaseTestMechanism):
_namespace = "http://stix.mitre.org/extensions/TestMechanism#YARA-1"
_binding = yara_tm_binding
_binding_class = _binding.YaraTestMechanismType
_XSI_TYPE = "yaraTM:YaraTestMechanismType"
def __init__(self, id_=None, idref=None):
super(YaraTestMechanism, self).__init__(id_=id_, idref=idref)
self.version = None
self.rule = None
@property
def rule(self):
return self._rule
@rule.setter
def rule(self, value):
if not value:
self._rule = None
if isinstance(value, EncodedCDATA):
self._rule = value
else:
self._rule = EncodedCDATA(value=value)
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
super(YaraTestMechanism, cls).from_obj(obj, return_obj)
return_obj.version = obj.Version
return_obj.rule = EncodedCDATA.from_obj(obj.Rule)
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
if not return_obj:
return_obj = self._binding_class()
super(YaraTestMechanism, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj.Version = self.version
return_obj.Rule = self.rule.to_obj(ns_info=ns_info)
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
super(YaraTestMechanism, cls).from_dict(d, return_obj)
return_obj.version = d.get('version')
return_obj.rule = EncodedCDATA.from_dict(d.get('rule'))
return return_obj
def to_dict(self):
d = super(YaraTestMechanism, self).to_dict()
if self.version:
d['version'] = self.version
if self.rule:
d['rule'] = self.rule.to_dict()
return d
stix.indicator.test_mechanism.add_extension(YaraTestMechanism)
|
benjamin9999/python-stix
|
stix/extensions/test_mechanism/yara_test_mechanism.py
|
Python
|
bsd-3-clause
| 2,517
|
'''
backends/gs.py - this file is part of S3QL.
Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
This work can be distributed under the terms of the GNU GPLv3.
'''
from ..logging import logging, QuietError # Ensure use of custom logger class
from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError,
AuthenticationError, DanglingStorageURLError,
get_proxy, get_ssl_context, CorruptedObjectError,
checksum_basic_mapping)
from ..common import OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET
from .. import BUFSIZE
from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
ABCDocstMeta)
from dugong import (HTTPConnection, is_temp_network_error, BodyFollowing, CaseInsensitiveDict,
ConnectionClosed)
from base64 import b64encode, b64decode
from itertools import count
from ast import literal_eval
import hashlib
import urllib.parse
import re
import tempfile
import os
import dugong
import json
import threading
import ssl
from typing import Optional, Dict, Any
try:
import google.auth as g_auth
except ModuleNotFoundError:
g_auth = None
log = logging.getLogger(__name__)
# Used only by adm.py
UPGRADE_MODE=False
class ServerResponseError(Exception):
'''Raised if the server response cannot be parsed.
For HTTP errors (i.e., non 2xx response codes), RequestError should
always be used instead (since in that case the response body can
not be expected to have any specific format).
'''
def __init__(self, resp: dugong.HTTPResponse, error: str,
body: str):
self.resp = resp
self.error = error
self.body = body
def __str__(self):
return '<ServerResponseError: %s>' % self.error
class RequestError(Exception):
'''
An error returned by the server.
'''
def __init__(self, code: str, reason: str, message: Optional[str] = None,
body: Optional[str] = None):
super().__init__()
self.code = code
self.reason = reason
self.message = message
self.body = body
def __str__(self) -> str:
if self.message:
return '<RequestError, code=%d, reason=%r, message=%r>' % (
self.code, self.reason, self.message)
elif self.body:
return '<RequestError, code=%d, reason=%r, with body data>' % (
self.code, self.reason)
else:
return '<RequestError, code=%d, reason=%r>' % (
self.code, self.reason)
class AccessTokenExpired(Exception):
'''
Raised if the access token has expired.
'''
class Backend(AbstractBackend, metaclass=ABCDocstMeta):
"""A backend to store data in Google Storage"""
known_options = {'ssl-ca-path', 'tcp-timeout'}
# We don't want to request an access token for each instance,
# because there is a limit on the total number of valid tokens.
# This class variable holds the mapping from refresh tokens to
# access tokens.
access_token = dict()
_refresh_lock = threading.Lock()
adc = None
def __init__(self, options):
super().__init__()
self.ssl_context = get_ssl_context(
options.backend_options.get('ssl-ca-path', None)) # type: Optional[ssl.Context]
self.options = options.backend_options # type: Dict[str, str]
self.proxy = get_proxy(ssl=True) # type: str
self.login = options.backend_login # type: str
self.refresh_token = options.backend_password # type: str
if self.login == 'adc':
if g_auth is None:
raise QuietError('ADC authentification requires the google.auth module')
elif self.adc is None:
import google.auth.transport.urllib3
import urllib3
# Deliberately ignore proxy and SSL context when attemping
# to connect to Compute Engine Metadata server.
requestor = google.auth.transport.urllib3.Request(
urllib3.PoolManager())
try:
credentials, _ = g_auth.default(
request=requestor,
scopes=['https://www.googleapis.com/auth/devstorage.full_control'])
except g_auth.exceptions.DefaultCredentialsError as exc:
raise QuietError('ADC found no valid credential sources: ' + str(exc))
type(self).adc = (credentials, requestor)
elif self.login != 'oauth2':
raise QuietError("Google Storage backend requires OAuth2 or ADC authentication")
# Special case for unit testing against local mock server
hit = re.match(r'^gs://!unittest!'
r'([^/:]+)' # Hostname
r':([0-9]+)' # Port
r'/([^/]+)' # Bucketname
r'(?:/(.*))?$', # Prefix
options.storage_url)
if hit:
self.hostname = hit.group(1)
self.port = int(hit.group(2))
self.bucket_name = hit.group(3)
self.prefix = hit.group(4) or ''
else:
hit = re.match(r'^gs://([^/]+)(?:/(.*))?$', options.storage_url)
if not hit:
raise QuietError('Invalid storage URL', exitcode=2)
self.bucket_name = hit.group(1)
self.hostname = 'www.googleapis.com'
self.prefix = hit.group(2) or ''
self.port = 443
self.conn = self._get_conn()
# Check if bucket exists and/or credentials are correct
path = '/storage/v1/b/' + urllib.parse.quote(self.bucket_name, safe='')
try:
resp = self._do_request('GET', path)
except RequestError as exc:
if exc.code == 404:
raise DanglingStorageURLError("Bucket '%s' does not exist" %
self.bucket_name)
exc = _map_request_error(exc, None)
if exc:
raise exc
raise
self._parse_json_response(resp)
@property
@copy_ancestor_docstring
def has_native_rename(self):
return False
@copy_ancestor_docstring
def reset(self):
if (self.conn is not None and
(self.conn.response_pending() or self.conn._out_remaining)):
log.debug('Resetting state of http connection %d', id(self.conn))
self.conn.disconnect()
def _get_conn(self):
'''Return connection to server'''
conn = HTTPConnection(self.hostname, self.port, proxy=self.proxy,
ssl_context=self.ssl_context)
conn.timeout = int(self.options.get('tcp-timeout', 20))
return conn
@copy_ancestor_docstring
def is_temp_failure(self, exc): #IGNORE:W0613
if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError):
# We probably can't use the connection anymore, so use this
# opportunity to reset it
self.conn.reset()
return True
elif isinstance(exc, RequestError) and (
500 <= exc.code <= 599 or exc.code == 408):
return True
elif isinstance(exc, AccessTokenExpired):
# Delete may fail if multiple threads encounter the same error
self.access_token.pop(self.refresh_token, None)
return True
# Not clear at all what is happening here, but in doubt we retry
elif isinstance(exc, ServerResponseError):
return True
if g_auth and isinstance(exc, g_auth.exceptions.TransportError):
return True
return False
def _assert_empty_response(self, resp):
'''Assert that current response body is empty'''
buf = self.conn.read(2048)
if not buf:
return # expected
body = '\n'.join('%s: %s' % x for x in resp.headers.items())
hit = re.search(r'; charset="(.+)"$',
resp.headers.get('Content-Type', ''),
re.IGNORECASE)
if hit:
charset = hit.group(1)
body += '\n' + buf.decode(charset, errors='backslashreplace')
log.warning('Expected empty response body, but got data - this is odd.')
raise ServerResponseError(resp, error='expected empty response',
body=body)
@retry
@copy_ancestor_docstring
def delete(self, key, force=False, is_retry=False):
log.debug('started with %s', key)
path = '/storage/v1/b/%s/o/%s' % (
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + key, safe=''))
try:
resp = self._do_request('DELETE', path)
self._assert_empty_response(resp)
except RequestError as exc:
exc = _map_request_error(exc, key)
if isinstance(exc, NoSuchObject) and (force or is_retry):
pass
elif exc:
raise exc
else:
raise
@copy_ancestor_docstring
def list(self, prefix=''):
prefix = self.prefix + prefix
strip = len(self.prefix)
page_token = None
while True:
(els, page_token) = self._list_page(prefix, page_token)
for el in els:
yield el[strip:]
if page_token is None:
break
@retry
def _list_page(self, prefix, page_token=None, batch_size=1000):
# Limit maximum number of results since we read everything
# into memory (because Python JSON doesn't have a streaming API)
query_string = { 'prefix': prefix, 'maxResults': str(batch_size) }
if page_token:
query_string['pageToken'] = page_token
path = '/storage/v1/b/%s/o' % (
urllib.parse.quote(self.bucket_name, safe=''),)
try:
resp = self._do_request('GET', path, query_string=query_string)
except RequestError as exc:
exc = _map_request_error(exc, None)
if exc:
raise exc
raise
json_resp = self._parse_json_response(resp)
page_token = json_resp.get('nextPageToken', None)
if 'items' not in json_resp:
assert page_token is None
return ((), None)
return ([ x['name'] for x in json_resp['items'] ], page_token)
@retry
@copy_ancestor_docstring
def lookup(self, key):
log.debug('started with %s', key)
return _unwrap_user_meta(self._get_gs_meta(key))
def _get_gs_meta(self, key):
path = '/storage/v1/b/%s/o/%s' % (
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + key, safe=''))
try:
resp = self._do_request('GET', path)
except RequestError as exc:
exc = _map_request_error(exc, key)
if exc:
raise exc
raise
return self._parse_json_response(resp)
@retry
@copy_ancestor_docstring
def get_size(self, key):
json_resp = self._get_gs_meta(key)
return json_resp['size']
@retry
@copy_ancestor_docstring
def open_read(self, key):
gs_meta = self._get_gs_meta(key)
path = '/storage/v1/b/%s/o/%s' % (
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + key, safe=''))
try:
resp = self._do_request('GET', path, query_string={'alt': 'media'})
except RequestError as exc:
exc = _map_request_error(exc, key)
if exc:
raise exc
raise
return ObjectR(key, resp, self, gs_meta)
@prepend_ancestor_docstring
def open_write(self, key, metadata=None, is_compressed=False):
"""
The returned object will buffer all data and only start the upload
when its `close` method is called.
"""
return ObjectW(key, self, metadata)
@retry
def write_fh(self, fh, key: str, md5: bytes,
metadata: Optional[Dict[str, Any]] = None,
size: Optional[int] = None):
'''Write data from byte stream *fh* into *key*.
*fh* must be seekable. If *size* is None, *fh* must also implement
`fh.fileno()` so that the size can be determined through `os.fstat`.
*md5* must be the (binary) md5 checksum of the data.
'''
metadata = json.dumps({
'metadata': _wrap_user_meta(metadata if metadata else {}),
'md5Hash': b64encode(md5).decode(),
'name': self.prefix + key,
})
# Google Storage uses Content-Length to read the object data, so we
# don't have to worry about the boundary occurring in the object data.
boundary = 'foo_bar_baz'
headers = CaseInsensitiveDict()
headers['Content-Type'] = 'multipart/related; boundary=%s' % boundary
body_prefix = '\n'.join(('--' + boundary,
'Content-Type: application/json; charset=UTF-8',
'', metadata,
'--' + boundary,
'Content-Type: application/octet-stream',
'', '')).encode()
body_suffix = ('\n--%s--\n' % boundary).encode()
body_size = len(body_prefix) + len(body_suffix)
if size is not None:
body_size += size
else:
body_size += os.fstat(fh.fileno()).st_size
path = '/upload/storage/v1/b/%s/o' % (
urllib.parse.quote(self.bucket_name, safe=''),)
query_string = {'uploadType': 'multipart'}
try:
resp = self._do_request('POST', path, query_string=query_string,
headers=headers, body=BodyFollowing(body_size))
except RequestError as exc:
exc = _map_request_error(exc, key)
if exc:
raise exc
raise
assert resp.status == 100
fh.seek(0)
md5_run = hashlib.md5()
try:
self.conn.write(body_prefix)
while True:
buf = fh.read(BUFSIZE)
if not buf:
break
self.conn.write(buf)
md5_run.update(buf)
self.conn.write(body_suffix)
except ConnectionClosed:
# Server closed connection while we were writing body data -
# but we may still be able to read an error response
try:
resp = self.conn.read_response()
except ConnectionClosed: # No server response available
pass
else:
log.warning('Server broke connection during upload, signaled '
'%d %s', resp.status, resp.reason)
# Re-raise first ConnectionClosed exception
raise
if md5_run.digest() != md5:
raise ValueError('md5 passed to write_fd does not match fd data')
resp = self.conn.read_response()
# If we're really unlucky, then the token has expired while we were uploading data.
if resp.status == 401:
self.conn.discard()
raise AccessTokenExpired()
elif resp.status != 200:
exc = self._parse_error_response(resp)
raise _map_request_error(exc, key) or exc
self._parse_json_response(resp)
@retry
@copy_ancestor_docstring
def update_meta(self, key, metadata):
headers = CaseInsensitiveDict()
headers['Content-Type'] = 'application/json; charset="utf-8"'
body = json.dumps({ 'metadata': _wrap_user_meta(metadata),
'acl': [] }).encode()
path = '/storage/v1/b/%s/o/%s' % (
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + key, safe=''))
try:
resp = self._do_request('PUT', path, headers=headers, body=body)
except RequestError as exc:
exc = _map_request_error(exc, key)
if exc:
raise exc
raise
self._parse_json_response(resp)
@copy_ancestor_docstring
def close(self):
self.conn.disconnect()
def __str__(self):
return '<gs.Backend, name=%s, prefix=%s>' % (self.bucket_name, self.prefix)
# This method uses a different HTTP connection than its callers, but shares
# the same retry logic. It is therefore possible that errors with this
# connection cause the other connection to be reset - but this should not
# be a problem, because there can't be a pending request if we don't have
# a valid access token.
def _get_access_token(self):
log.info('Requesting new access token')
if self.adc:
try:
self.adc[0].refresh(self.adc[1])
except g_auth.exceptions.RefreshError as exc:
raise AuthenticationError(
'Failed to refresh credentials: ' + str(exc))
self.access_token[self.refresh_token] = self.adc[0].token
return
headers = CaseInsensitiveDict()
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
body = urllib.parse.urlencode({
'client_id': OAUTH_CLIENT_ID,
'client_secret': OAUTH_CLIENT_SECRET,
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token' })
conn = HTTPConnection('accounts.google.com', 443, proxy=self.proxy,
ssl_context=self.ssl_context)
try:
conn.send_request('POST', '/o/oauth2/token', headers=headers,
body=body.encode('utf-8'))
resp = conn.read_response()
json_resp = self._parse_json_response(resp, conn)
if resp.status > 299 or resp.status < 200:
assert 'error' in json_resp
if 'error' in json_resp:
raise AuthenticationError(json_resp['error'])
else:
self.access_token[self.refresh_token] = json_resp['access_token']
finally:
conn.disconnect()
def _parse_error_response(self, resp, conn=None):
'''Return exception corresponding to server response.'''
try:
json_resp = self._parse_json_response(resp, conn)
except ServerResponseError as exc:
# Error messages may come from intermediate proxies and thus may not
# be in JSON.
log.debug('Server response not JSON - intermediate proxy failure?')
return RequestError(code=resp.status, reason=resp.reason,
body=exc.body)
try:
message = json_resp['error']['message']
body = None
except KeyError:
log.warning('Did not find error.message element in JSON '
'error response. This is odd.')
message = None
body = str(json_resp)
return RequestError(code=resp.status, reason=resp.reason, message=message,
body=body)
def _parse_json_response(self, resp, conn=None):
if conn is None:
conn = self.conn
# Note that even though the final server backend may guarantee to always
# deliver a JSON document body with a detailed error message, we may
# also get errors from intermediate proxies.
content_type = resp.headers.get('Content-Type', None)
if content_type:
hit = re.match(r'application/json(?:; charset="(.+)")?$',
resp.headers['Content-Type'], re.IGNORECASE)
if not content_type or not hit:
raise ServerResponseError(resp, error='expected json, got %s' % content_type,
body=self._dump_body(resp))
charset = hit.group(1)
body = conn.readall()
try:
body_text = body.decode(charset)
except UnicodeDecodeError as exc:
log.warning('Unable to decode JSON response as Unicode (%s) '
'- this is odd.', str(exc))
raise ServerResponseError(resp, error=str(exc),
body=body.decode(charset, errors='backslashreplace'))
try:
resp_json = json.loads(body_text)
except json.JSONDecodeError as exc:
log.warning('Unable to decode JSON response (%s) - this is odd.', str(exc))
raise ServerResponseError(resp, error=str(exc), body=body_text)
return resp_json
def _dump_body(self, resp):
'''Return truncated string representation of response body.'''
is_truncated = False
try:
body = self.conn.read(2048)
if self.conn.read(1):
is_truncated = True
self.conn.discard()
except dugong.UnsupportedResponse:
log.warning('Unsupported response, trying to retrieve data from raw socket!')
body = self.conn.read_raw(2048)
self.conn.close()
hit = re.search(r'; charset="(.+)"$',
resp.headers.get('Content-Type', ''),
re.IGNORECASE)
if hit:
charset = hit.group(1)
else:
charset = 'utf-8'
body = body.decode(charset, errors='backslashreplace')
if is_truncated:
body += '... [truncated]'
return body
def _do_request(self, method, path, query_string=None, headers=None, body=None):
'''Send request, read and return response object'''
log.debug('started with %s %s, qs=%s', method, path, query_string)
if headers is None:
headers = CaseInsensitiveDict()
expect100 = isinstance(body, BodyFollowing)
headers['host'] = self.hostname
if query_string:
s = urllib.parse.urlencode(query_string, doseq=True)
path += '?%s' % s
# If we have an access token, try to use it.
token = self.access_token.get(self.refresh_token, None)
if token is not None:
headers['Authorization'] = 'Bearer ' + token
self.conn.send_request(method, path, body=body, headers=headers,
expect100=expect100)
resp = self.conn.read_response()
if ((expect100 and resp.status == 100) or
(not expect100 and 200 <= resp.status <= 299)):
return resp
elif resp.status != 401:
raise self._parse_error_response(resp)
self.conn.discard()
# If we reach this point, then the access token must have
# expired, so we try to get a new one. We use a lock to prevent
# multiple threads from refreshing the token simultaneously.
with self._refresh_lock:
# Don't refresh if another thread has already done so while
# we waited for the lock.
if token is None or self.access_token.get(self.refresh_token, None) == token:
self._get_access_token()
# Try request again. If this still fails, propagate the error
# (because we have just refreshed the access token).
# FIXME: We can't rely on this if e.g. the system hibernated
# after refreshing the token, but before reaching this line.
headers['Authorization'] = 'Bearer ' + self.access_token[self.refresh_token]
self.conn.send_request(method, path, body=body, headers=headers,
expect100=expect100)
resp = self.conn.read_response()
if ((expect100 and resp.status == 100) or
(not expect100 and 200 <= resp.status <= 299)):
return resp
else:
raise self._parse_error_response(resp)
@retry
@copy_ancestor_docstring
def copy(self, src, dest, metadata=None):
log.debug('started with %s, %s', src, dest)
if not (metadata is None or isinstance(metadata, dict)):
raise TypeError('*metadata*: expected dict or None, got %s' % type(metadata))
headers = CaseInsensitiveDict()
if metadata is not None:
headers['Content-Type'] = 'application/json; charset="utf-8"'
body = json.dumps({'metadata': _wrap_user_meta(metadata)}).encode()
else:
body = None
path = '/storage/v1/b/%s/o/%s/rewriteTo/b/%s/o/%s' % (
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + src, safe=''),
urllib.parse.quote(self.bucket_name, safe=''),
urllib.parse.quote(self.prefix + dest, safe=''))
try:
resp = self._do_request('POST', path, headers=headers, body=body)
except RequestError as exc:
exc = _map_request_error(exc, src)
if exc:
raise exc
raise
json_resp = self._parse_json_response(resp)
assert json_resp['done']
assert 'rewriteToken' not in json_resp
def _map_request_error(exc: RequestError, key: str):
'''Map RequestError to more general exception if possible'''
if exc.code == 404 and key:
return NoSuchObject(key)
elif exc.message == 'Forbidden':
return AuthorizationError(exc.message)
elif exc.message in ('Login Required', 'Invalid Credentials'):
return AuthenticationError(exc.message)
return None
def _wrap_user_meta(user_meta):
obj_meta = dict()
for (key, val) in user_meta.items():
if not isinstance(key, str):
raise TypeError('metadata keys must be str, not %s' % type(key))
if (not isinstance(val, (str, bytes, int, float, complex, bool))
and val is not None):
raise TypeError('value for key %s (%s) is not elementary' % (key, val))
if isinstance(val, (bytes, bytearray)):
val = b64encode(val)
obj_meta[key] = repr(val)
return obj_meta
def _unwrap_user_meta(json_resp):
'''Extract user metadata from JSON object metadata'''
meta_raw = json_resp.get('metadata', None)
if meta_raw is None:
return {}
# Detect Legacy format.
if (meta_raw.get('format', None) == 'raw2' and
'md5' in meta_raw and
all(key in ('format', 'md5') or re.match(r'^\d\d\d$', key)
for key in meta_raw.keys())):
parts = []
for i in count():
part = meta_raw.get('%03d' % i, None)
if part is None:
break
parts.append(part)
buf = ''.join(parts)
meta = literal_eval('{ %s }' % buf)
for (k,v) in meta.items():
if isinstance(v, bytes):
meta[k] = b64decode(v)
# TODO: Remove next block on next file system revision bump.
# Metadata MD5 headers were created by old S3QL versions where the
# Google Storage backend shared code with the S3C backend (which
# supports plain HTTP connections). There's no need to validate them
# here since Google Storage always uses TLS. However, we retain the code
# for now since the metadata format was used to detect an old filesystem
# revision.
stored_md5 = meta_raw.get('md5', None)
new_md5 = b64encode(checksum_basic_mapping(meta)).decode('ascii')
if stored_md5 != new_md5:
if UPGRADE_MODE:
old_md5 = b64encode(UPGRADE_MODE(meta)).decode('ascii')
if stored_md5 == old_md5:
meta['needs_reupload'] = True
else:
raise CorruptedObjectError(
'Metadata MD5 mismatch for %s (%s vs %s (old) or %s (new))'
% (json_resp.get('name', None), stored_md5, old_md5, new_md5))
else:
raise CorruptedObjectError(
'Metadata MD5 mismatch for %s (%s vs %s)'
% (json_resp.get('name', None), stored_md5, new_md5))
elif UPGRADE_MODE:
meta['needs_reupload'] = False
return meta
meta = {}
for (k,v) in meta_raw.items():
try:
v2 = literal_eval(v)
except ValueError as exc:
raise CorruptedObjectError('Invalid metadata value: ' + str(exc))
if isinstance(v2, bytes):
meta[k] = b64decode(v2)
else:
meta[k] = v2
return meta
class ObjectR(object):
'''A GS object open for reading'''
def __init__(self, key, resp, backend, gs_meta):
self.key = key
self.closed = False
self.md5_checked = False
self.backend = backend
self.resp = resp
self.metadata = _unwrap_user_meta(gs_meta)
self.md5_want = b64decode(gs_meta['md5Hash'])
self.md5 = hashlib.md5()
def read(self, size=None):
'''Read up to *size* bytes of object data
For integrity checking to work, this method has to be called until
it returns an empty string, indicating that all data has been read
(and verified).
'''
if size == 0:
return b''
# This may raise an exception, in which case we probably can't re-use
# the connection. However, we rely on the caller to still close the
# file-like object, so that we can do cleanup in close().
buf = self.backend.conn.read(size)
self.md5.update(buf)
# Check MD5 on EOF (size == None implies EOF)
if (not buf or size is None) and not self.md5_checked:
self.md5_checked = True
if self.md5_want != self.md5.digest():
log.warning('MD5 mismatch for %s: %s vs %s',
self.key, b64encode(self.md5_want),
b64encode(self.md5.digest()))
raise ServerResponseError(error='md5Hash mismatch',
body=b'<binary blob>',
resp=self.resp)
return buf
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
return False
def close(self, checksum_warning=True):
'''Close object
If *checksum_warning* is true, this will generate a warning message if
the object has not been fully read (because in that case the MD5
checksum cannot be checked).
'''
if self.closed:
return
self.closed = True
# If we have not read all the data, close the entire
# connection (otherwise we loose synchronization)
if not self.md5_checked:
if checksum_warning:
log.warning("Object closed prematurely, can't check MD5, and have to "
"reset connection")
self.backend.conn.disconnect()
class ObjectW(object):
'''An GS object open for writing
All data is first cached in memory, upload only starts when
the close() method is called.
'''
def __init__(self, key, backend, metadata):
self.key = key
self.backend = backend
self.metadata = metadata
self.closed = False
self.obj_size = 0
self.md5 = hashlib.md5()
# According to http://docs.python.org/3/library/functions.html#open
# the buffer size is typically ~8 kB. We process data in much
# larger chunks, so buffering would only hurt performance.
self.fh = tempfile.TemporaryFile(buffering=0)
def write(self, buf):
'''Write object data'''
self.fh.write(buf)
self.md5.update(buf)
self.obj_size += len(buf)
def close(self):
'''Close object and upload data'''
if self.closed:
return
self.backend.write_fh(self.fh, self.key, self.md5.digest(),
self.metadata, size=self.obj_size)
self.closed = True
self.fh.close()
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
return False
def get_obj_size(self):
if not self.closed:
raise RuntimeError('Object must be closed first.')
return self.obj_size
def md5sum_b64(buf):
'''Return base64 encoded MD5 sum'''
return b64encode(hashlib.md5(buf).digest()).decode('ascii')
|
s3ql/s3ql
|
src/s3ql/backends/gs.py
|
Python
|
gpl-3.0
| 32,712
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AppliedReservationList
from ._models_py3 import AppliedReservations
from ._models_py3 import AvailableScopeProperties
from ._models_py3 import AvailableScopeRequest
from ._models_py3 import AvailableScopeRequestProperties
from ._models_py3 import BillingInformation
from ._models_py3 import CalculateExchangeOperationResultResponse
from ._models_py3 import CalculateExchangeRequest
from ._models_py3 import CalculateExchangeRequestProperties
from ._models_py3 import CalculateExchangeResponseProperties
from ._models_py3 import CalculatePriceResponse
from ._models_py3 import CalculatePriceResponseProperties
from ._models_py3 import CalculatePriceResponsePropertiesBillingCurrencyTotal
from ._models_py3 import CalculatePriceResponsePropertiesPricingCurrencyTotal
from ._models_py3 import Catalog
from ._models_py3 import CreateGenericQuotaRequestParameters
from ._models_py3 import CurrentQuotaLimit
from ._models_py3 import CurrentQuotaLimitBase
from ._models_py3 import Error
from ._models_py3 import ExceptionResponse
from ._models_py3 import ExchangeOperationResultResponse
from ._models_py3 import ExchangePolicyError
from ._models_py3 import ExchangePolicyErrors
from ._models_py3 import ExchangeRequest
from ._models_py3 import ExchangeRequestProperties
from ._models_py3 import ExchangeResponseProperties
from ._models_py3 import ExtendedErrorInfo
from ._models_py3 import ExtendedStatusInfo
from ._models_py3 import MergeRequest
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationList
from ._models_py3 import OperationResponse
from ._models_py3 import OperationResultError
from ._models_py3 import Patch
from ._models_py3 import PatchPropertiesRenewProperties
from ._models_py3 import PaymentDetail
from ._models_py3 import Price
from ._models_py3 import PurchaseRequest
from ._models_py3 import PurchaseRequestPropertiesReservedResourceProperties
from ._models_py3 import QuotaLimits
from ._models_py3 import QuotaLimitsResponse
from ._models_py3 import QuotaProperties
from ._models_py3 import QuotaRequestDetails
from ._models_py3 import QuotaRequestDetailsList
from ._models_py3 import QuotaRequestOneResourceSubmitResponse
from ._models_py3 import QuotaRequestProperties
from ._models_py3 import QuotaRequestSubmitResponse
from ._models_py3 import QuotaRequestSubmitResponse201
from ._models_py3 import RenewPropertiesResponse
from ._models_py3 import RenewPropertiesResponseBillingCurrencyTotal
from ._models_py3 import RenewPropertiesResponsePricingCurrencyTotal
from ._models_py3 import ReservationList
from ._models_py3 import ReservationMergeProperties
from ._models_py3 import ReservationOrderBillingPlanInformation
from ._models_py3 import ReservationOrderList
from ._models_py3 import ReservationOrderResponse
from ._models_py3 import ReservationProperties
from ._models_py3 import ReservationResponse
from ._models_py3 import ReservationSplitProperties
from ._models_py3 import ReservationToExchange
from ._models_py3 import ReservationToPurchaseCalculateExchange
from ._models_py3 import ReservationToPurchaseExchange
from ._models_py3 import ReservationToReturn
from ._models_py3 import ReservationToReturnForExchange
from ._models_py3 import ResourceName
from ._models_py3 import ScopeProperties
from ._models_py3 import ServiceError
from ._models_py3 import ServiceErrorDetail
from ._models_py3 import SkuName
from ._models_py3 import SkuProperty
from ._models_py3 import SkuRestriction
from ._models_py3 import SplitRequest
from ._models_py3 import SubRequest
from ._models_py3 import SubscriptionScopeProperties
except (SyntaxError, ImportError):
from ._models import AppliedReservationList # type: ignore
from ._models import AppliedReservations # type: ignore
from ._models import AvailableScopeProperties # type: ignore
from ._models import AvailableScopeRequest # type: ignore
from ._models import AvailableScopeRequestProperties # type: ignore
from ._models import BillingInformation # type: ignore
from ._models import CalculateExchangeOperationResultResponse # type: ignore
from ._models import CalculateExchangeRequest # type: ignore
from ._models import CalculateExchangeRequestProperties # type: ignore
from ._models import CalculateExchangeResponseProperties # type: ignore
from ._models import CalculatePriceResponse # type: ignore
from ._models import CalculatePriceResponseProperties # type: ignore
from ._models import CalculatePriceResponsePropertiesBillingCurrencyTotal # type: ignore
from ._models import CalculatePriceResponsePropertiesPricingCurrencyTotal # type: ignore
from ._models import Catalog # type: ignore
from ._models import CreateGenericQuotaRequestParameters # type: ignore
from ._models import CurrentQuotaLimit # type: ignore
from ._models import CurrentQuotaLimitBase # type: ignore
from ._models import Error # type: ignore
from ._models import ExceptionResponse # type: ignore
from ._models import ExchangeOperationResultResponse # type: ignore
from ._models import ExchangePolicyError # type: ignore
from ._models import ExchangePolicyErrors # type: ignore
from ._models import ExchangeRequest # type: ignore
from ._models import ExchangeRequestProperties # type: ignore
from ._models import ExchangeResponseProperties # type: ignore
from ._models import ExtendedErrorInfo # type: ignore
from ._models import ExtendedStatusInfo # type: ignore
from ._models import MergeRequest # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationList # type: ignore
from ._models import OperationResponse # type: ignore
from ._models import OperationResultError # type: ignore
from ._models import Patch # type: ignore
from ._models import PatchPropertiesRenewProperties # type: ignore
from ._models import PaymentDetail # type: ignore
from ._models import Price # type: ignore
from ._models import PurchaseRequest # type: ignore
from ._models import PurchaseRequestPropertiesReservedResourceProperties # type: ignore
from ._models import QuotaLimits # type: ignore
from ._models import QuotaLimitsResponse # type: ignore
from ._models import QuotaProperties # type: ignore
from ._models import QuotaRequestDetails # type: ignore
from ._models import QuotaRequestDetailsList # type: ignore
from ._models import QuotaRequestOneResourceSubmitResponse # type: ignore
from ._models import QuotaRequestProperties # type: ignore
from ._models import QuotaRequestSubmitResponse # type: ignore
from ._models import QuotaRequestSubmitResponse201 # type: ignore
from ._models import RenewPropertiesResponse # type: ignore
from ._models import RenewPropertiesResponseBillingCurrencyTotal # type: ignore
from ._models import RenewPropertiesResponsePricingCurrencyTotal # type: ignore
from ._models import ReservationList # type: ignore
from ._models import ReservationMergeProperties # type: ignore
from ._models import ReservationOrderBillingPlanInformation # type: ignore
from ._models import ReservationOrderList # type: ignore
from ._models import ReservationOrderResponse # type: ignore
from ._models import ReservationProperties # type: ignore
from ._models import ReservationResponse # type: ignore
from ._models import ReservationSplitProperties # type: ignore
from ._models import ReservationToExchange # type: ignore
from ._models import ReservationToPurchaseCalculateExchange # type: ignore
from ._models import ReservationToPurchaseExchange # type: ignore
from ._models import ReservationToReturn # type: ignore
from ._models import ReservationToReturnForExchange # type: ignore
from ._models import ResourceName # type: ignore
from ._models import ScopeProperties # type: ignore
from ._models import ServiceError # type: ignore
from ._models import ServiceErrorDetail # type: ignore
from ._models import SkuName # type: ignore
from ._models import SkuProperty # type: ignore
from ._models import SkuRestriction # type: ignore
from ._models import SplitRequest # type: ignore
from ._models import SubRequest # type: ignore
from ._models import SubscriptionScopeProperties # type: ignore
from ._azure_reservation_api_enums import (
AppliedScopeType,
CalculateExchangeOperationResultStatus,
ErrorResponseCode,
ExchangeOperationResultStatus,
InstanceFlexibility,
OperationStatus,
PaymentStatus,
QuotaRequestState,
ReservationBillingPlan,
ReservationStatusCode,
ReservationTerm,
ReservedResourceType,
ResourceType,
)
__all__ = [
'AppliedReservationList',
'AppliedReservations',
'AvailableScopeProperties',
'AvailableScopeRequest',
'AvailableScopeRequestProperties',
'BillingInformation',
'CalculateExchangeOperationResultResponse',
'CalculateExchangeRequest',
'CalculateExchangeRequestProperties',
'CalculateExchangeResponseProperties',
'CalculatePriceResponse',
'CalculatePriceResponseProperties',
'CalculatePriceResponsePropertiesBillingCurrencyTotal',
'CalculatePriceResponsePropertiesPricingCurrencyTotal',
'Catalog',
'CreateGenericQuotaRequestParameters',
'CurrentQuotaLimit',
'CurrentQuotaLimitBase',
'Error',
'ExceptionResponse',
'ExchangeOperationResultResponse',
'ExchangePolicyError',
'ExchangePolicyErrors',
'ExchangeRequest',
'ExchangeRequestProperties',
'ExchangeResponseProperties',
'ExtendedErrorInfo',
'ExtendedStatusInfo',
'MergeRequest',
'OperationDisplay',
'OperationList',
'OperationResponse',
'OperationResultError',
'Patch',
'PatchPropertiesRenewProperties',
'PaymentDetail',
'Price',
'PurchaseRequest',
'PurchaseRequestPropertiesReservedResourceProperties',
'QuotaLimits',
'QuotaLimitsResponse',
'QuotaProperties',
'QuotaRequestDetails',
'QuotaRequestDetailsList',
'QuotaRequestOneResourceSubmitResponse',
'QuotaRequestProperties',
'QuotaRequestSubmitResponse',
'QuotaRequestSubmitResponse201',
'RenewPropertiesResponse',
'RenewPropertiesResponseBillingCurrencyTotal',
'RenewPropertiesResponsePricingCurrencyTotal',
'ReservationList',
'ReservationMergeProperties',
'ReservationOrderBillingPlanInformation',
'ReservationOrderList',
'ReservationOrderResponse',
'ReservationProperties',
'ReservationResponse',
'ReservationSplitProperties',
'ReservationToExchange',
'ReservationToPurchaseCalculateExchange',
'ReservationToPurchaseExchange',
'ReservationToReturn',
'ReservationToReturnForExchange',
'ResourceName',
'ScopeProperties',
'ServiceError',
'ServiceErrorDetail',
'SkuName',
'SkuProperty',
'SkuRestriction',
'SplitRequest',
'SubRequest',
'SubscriptionScopeProperties',
'AppliedScopeType',
'CalculateExchangeOperationResultStatus',
'ErrorResponseCode',
'ExchangeOperationResultStatus',
'InstanceFlexibility',
'OperationStatus',
'PaymentStatus',
'QuotaRequestState',
'ReservationBillingPlan',
'ReservationStatusCode',
'ReservationTerm',
'ReservedResourceType',
'ResourceType',
]
|
Azure/azure-sdk-for-python
|
sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/models/__init__.py
|
Python
|
mit
| 12,190
|
import logging
from .scoring_system import ScoringSystem
logger = logging.getLogger(__name__)
class SingleThread(ScoringSystem):
def _process_missing_scores(self, request, missing_model_set_revs,
root_caches, inprogress_results=None):
rev_scores = {}
errors = {}
for missing_models, rev_ids in missing_model_set_revs.items():
for rev_id in rev_ids:
if rev_id not in root_caches:
continue
root_cache = root_caches[rev_id]
try:
score_map = self._process_score_map(
request, rev_id, missing_models, root_cache)
rev_scores[rev_id] = score_map
except Exception as error:
errors[rev_id] = error
return rev_scores, errors
@classmethod
def from_config(cls, config, name, section_key="scoring_systems"):
logger.info("Loading SingleThread '{0}' from config.".format(name))
kwargs = cls._kwargs_from_config(
config, name, section_key=section_key)
return cls(**kwargs)
|
he7d3r/ores
|
ores/scoring_systems/single_thread.py
|
Python
|
mit
| 1,157
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def index():
print '欢迎登陆后台管理系统!'
|
zhangyage/Python-oldboy
|
day04/reflect/backend/admin.py
|
Python
|
apache-2.0
| 105
|
"""Websocket client implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import socket
import time
import urllib
import websocket as ws_client
_LOGGER = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = 30
class WSConnectionError(Exception):
"""Error raised when connection attempts fail."""
CLI_WS_EXCEPTIONS = [
(WSConnectionError, 'Could not connect to the websocket API')
]
class _RetryError(Exception):
"""Error indicating that retry attempt should be made."""
def __init__(self, since):
Exception.__init__(self)
self.since = since
def _ws_events(ws_conn, message, snapshot, since, on_message, on_error):
"""Process websocket events."""
# Pylint complains too many nested blocks.
#
# pylint: disable=R0101
last_timestamp = since
subscription_msg = {'since': since,
'snapshot': snapshot}
subscription_msg.update(message)
try:
ws_conn.send(json.dumps(subscription_msg))
while True:
try:
reply = ws_conn.recv()
if not reply:
break
result = json.loads(reply)
if '_error' in result:
if on_error:
on_error(result)
break
last_timestamp = result.get('when', time.time())
if on_message:
if not on_message(result):
break
except ws_client.WebSocketTimeoutException:
ws_conn.ping()
except ws_client.WebSocketConnectionClosedException as err:
_LOGGER.debug('ws connection closed, will retry: %s.', str(err))
raise _RetryError(last_timestamp)
finally:
ws_conn.close()
def ws_loop(apis, message, snapshot, on_message, on_error=None,
timeout=_DEFAULT_TIMEOUT):
"""Instance trace loop."""
ws_conn = None
since = 0
while True:
for api in apis:
try:
_LOGGER.debug('Connecting to %s, [timeout: %s]', api, timeout)
parsed = urllib.parse.urlparse(api)
if ':' in parsed.netloc:
host, _port = parsed.netloc.split(':')
else:
host = parsed.netloc
# TODO: we never use proxy when connecting to websocket
# server. It is not clear if such behavior need to be
# optional.
#
# Need to set both http_proxy_host AND http_no_proxy.
# The code in websocket client only examines _no_proxy if
# http_proxy_host is set. If it is not, it ignores the
# no_proxy setting, and latter on initializes proxy from
# environment variables (BUG).
ws_conn = ws_client.create_connection(
api,
timeout=timeout,
http_proxy_host='__ignored__.yet.must.be.set',
http_no_proxy=[host]
)
_LOGGER.debug('Connected.')
_LOGGER.debug('Sending %s', json.dumps(message))
return _ws_events(ws_conn, message, snapshot, since,
on_message, on_error)
except ws_client.WebSocketTimeoutException as to_err:
_LOGGER.debug('Connection timeout: %s, %s', api, str(to_err))
continue
except ws_client.WebSocketProxyException as proxy_err:
_LOGGER.debug('Websocket connection error: %s, %s', api,
str(proxy_err))
continue
except socket.error:
_LOGGER.debug('Connection failed: %s', api)
continue
except _RetryError as retry_err:
since = retry_err.since
if not ws_conn:
raise WSConnectionError()
|
Morgan-Stanley/treadmill
|
lib/python/treadmill/websocket/client.py
|
Python
|
apache-2.0
| 4,130
|
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm, EditForm
from .models import User
from datetime import datetime
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
posts = [ # fake array of posts
{
'author': {'nickname': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickname=nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html',
user=user,
posts=posts)
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('You changes have been saved.')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
|
jtara1/SimpleFlaskWebsite
|
app/views.py
|
Python
|
apache-2.0
| 3,783
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pyliferisk: A python library for simple actuarial calculations
# Version: 1.11 - Nov 2019
# Copyright (C) 2019 Francisco Garate
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Mortality table class ----------------
class MortalityTable:
def __init__(self, lx=[], qx=[], i=[], nt=None, perc=100):
self.lx = lx
self.qx = qx
self.dx = []
self.ex = []
self.w = 0
self.i = i
self.q = 0
self.perc = perc
self.nt = nt
self.Dx = []
self.Nx = []
self.Cx = []
self.Mx = []
self.nEx = []
if nt:
mt = nt
init = mt[0]
self.qx = [0.0] * init
end_val = 0
for val in mt[1:]:
if end_val < 1000.0:
end_val = val * perc / 100
self.qx.append(end_val)
if perc != 100:
self.qx.append(1000)
if self.lx == []:
self.lx = [100000.0]
for val in self.qx:
self.lx.append(self.lx[-1] * (1 - val / 1000))
if self.lx[-1] != 0.0 :
self.lx.append(0.0)
if self.w == 0 :
self.w = self.lx.index(0) - 1
if self.qx == []:
#self.qx = []
l_x = self.lx[0]
for l_x1 in self.lx[1:]:
self.qx.append((l_x - l_x1) * 1000 / l_x)
l_x = l_x1
if self.dx == []:
dx_0 = -1
end_x_lx = self.lx.index(0)
for lx0 in self.lx:
dx_0 += 1
lx1 = min(dx_0 + 1, end_x_lx)
self.dx.append(lx0 - self.lx[lx1])
if self.ex == []:
for g in range(0, len(self.lx[:-1])):
lx_g = self.lx[g]
self.ex.append(0.5 + sum(self.lx[g + 1:-1]) / lx_g) #[g+1:-2] according notes from ucm
def view(self, start=0, end=10, var='lx'):
column = {'qx': self.qx, 'lx': self.lx, 'dx': self.dx, 'ex': self.ex, 'nt': self.nt, \
'Dx': self.Dx, 'Nx': self.Nx, 'Cx': self.Cx, 'Mx': self.Mx, 'nEx': self.nEx}
table_str = ''
index = start
if var == 'nt':
subs = 'index'
else:
subs = 'x'
for i in column[var][start:end + 1]:
table_str += '[{}={}] {}={}\n'.format(subs, index, var, i)
index += 1
print(table_str + 'Total number of rows for {} = {}'.format(var, len(column[var])))
class Actuarial:
def __init__(self, lx=[], qx=[], nt=None, i=None, perc=100):
self.lx = lx
self.qx = qx
self.dx = []
self.ex = []
self.w = 0
self.i = i
self.q = 0
self.perc = perc
self.nt = nt
self.Dx = []
self.Nx = []
self.Cx = []
self.Mx = []
self.nEx = []
if nt:
mt = nt
init = mt[0]
self.qx = [0.0] * init
end_val = 0
for val in mt[1:]:
if end_val < 1000.0:
end_val = val * perc / 100
self.qx.append(end_val)
if perc != 100:
self.qx.append(1000)
if self.lx == []:
self.lx = [100000.0]
for val in self.qx:
self.lx.append(self.lx[-1] * ( 1 - val / 1000))
if self.lx[-1] != 0.0 :
self.lx.append(0.0)
if self.w == 0 :
self.w = self.lx.index(0) - 1
if self.qx == []:
#self.qx = []
l_x = self.lx[0]
for l_x1 in self.lx[1:]:
self.qx.append((l_x - l_x1) * 1000 / l_x)
l_x = l_x1
if self.dx == []:
dx_0 = -1
end_x_lx = self.lx.index(0)
for lx0 in self.lx:
dx_0 += 1
lx1 = min(dx_0 + 1, end_x_lx)
self.dx.append(lx0 - self.lx[lx1])
if self.ex == []:
for g in range(0, len(self.lx[:-1])):
lx_g = self.lx[g]
self.ex.append(0.5 + sum(self.lx[g + 1:-1]) / lx_g) #[g+1:-2] according notes from ucm
if self.Dx == []:
#self.Dx = []
age = -1
for j in self.lx:
age+=1
self.Dx.append(((1 / (1 + i)) ** age) * j)
if self.Nx == []:
#self.Nx = []
for k in range(0, len(self.Dx)):
self.Nx.append(sum(self.Dx[k:-1])) #[k:-2] according notes from ucm
if self.Cx == []:
#self.Cx = []
age = -1
for l in self.dx: #[:-1]
age += 1
C_x = ((1 / (1 + i)) ** (age + 1)) * l
self.Cx.append(C_x)
if self.Mx == []:
#self.Mx = []
for m in range(0, len(self.Cx)):
self.Mx.append(sum(self.Cx[m:-1])) # [m:-2] according notes from ucm
def view(self, start=0, end=10, var='lx'):
column = {'qx': self.qx, 'lx': self.lx, 'dx': self.dx, 'ex': self.ex, 'nt': self.nt, \
'Dx': self.Dx, 'Nx': self.Nx, 'Cx': self.Cx, 'Mx': self.Mx, 'nEx': self.nEx}
table_str = ''
index = start
if var == 'nt':
subs = 'index'
else:
subs = 'x'
for i in column[var][start:end + 1]:
table_str += '[{}={}] {}={}\n'.format(subs, index, var, i)
index += 1
print(table_str + 'Total number of rows for {} = {}'.format(var, len(column[var])))
# Actuarial notation -------------------
def qx(mt, x):
""" qx: Returns the probability that a life aged x dies before 1 year
With the convention: the true probability is qx/1000
Args:
mt: the mortality table
x: the age as integer number.
"""
if x < len(mt.qx):
return mt.qx[x]
else:
return 0
def lx(mt, x):
""" lx : Returns the number of survivors at begining of age x """
if x < len(mt.lx):
return mt.lx[x]
else:
return 0
def w(mt):
""" w : ultimate age (lw = 0) """
return len(mt.lx)
def dx(mt, x):
""" Returns the number of dying at begining of age x """
end_x_val = mt.lx.index(0)
if x < end_x_val:
return mt.lx[x] - mt.lx[x + 1]
else:
return 0.0
def px(mt, x):
""" px : Returns the probability of surviving within 1 year """
return 1000 - mt.qx[x]
def tpx(mt, x, t):
""" tpx : Returns the probability that x will survive within t years """
""" npx : Returns n years survival probability at age x """
return mt.lx[x + t] / mt.lx[x]
def tqx(mt, x, t):
""" nqx : Returns the probability to die within n years at age x """
return (mt.lx[x] - mt.lx[x + t]) / mt.lx[x]
def tqxn(mt, x, n, t):
""" n/qx : Probability to die in n years being alive at age x.
Probability that x survives n year, and then dies in th subsequent t years """
return tpx(mt, x, t) * qx(mt, x + n)
def ex(mt, x):
""" ex : Returns the curtate expectation of life. Life expectancy """
sum1 = 0
for j in mt.lx[x + 1:-1]:
sum1 += j
#print sum1
try:
return sum1 / mt.lx[x] + 0.5
except:
return 0
def mx(mt, x):
""" mx : Returns the central mortality rate """
return dx(mt, x) / mt.lx[x]
# Commutations ------------------
def Dx(mt, x):
""" Return the Dx """
return ((1 / (1 + mt.i)) ** x) * mt.lx[x]
def Nx(mt, x):
""" Return the Nx """
n = len(mt.Dx)
sum1 = 0
for j in range(x, n):
k = mt.Dx[j]
sum1 += k
return sum1
def Sx(mt, x):
""" Return the Sx """
n = len(mt.Nx)
sum1 = 0
for j in range(x, n):
k = mt.Nx[j]
sum1 += k
return sum1
def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i))
def Mx(mt, x):
""" Return the Mx """
n = len(mt.Cx)
sum1 = 0
for j in range(x, n):
k = mt.Cx[j]
sum1 += k
return sum1
def Rx(mt, x):
""" Return the Rx """
n = len(mt.Mx)
sum1 = 0
for j in range(x, n):
k = mt.Mx[j]
sum1 += k
return sum1
# Pure endowment: Deferred capital ---
def nEx(mt, x, n):
""" nEx : Returns the EPV of a pure endowment (deferred capital).
Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) """
return mt.Dx[x + n] / mt.Dx[x]
# Actuarial present value
# Whole life insurance ---
def Ax(mt, x):
""" Ax : Returns the Expected Present Value (EPV) of a whole life insurance (i.e. net single premium).
It is also commonly referred to as the Actuarial Value or Actuarial Present Value. """
return mt.Mx[x] / mt.Dx[x]
# Term insurance ---
def Axn(mt, x, n):
""" (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x]
# Endowment insurance ---
def AExn(mt, x, n):
""" AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment
"""
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x]
# Deferred insurance benefits ---
def tAx(mt, x, t):
""" n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. """
return mt.Mx[x + t] / mt.Dx[x]
def tAxn(mt, x, n, t):
pass
# IAx ---
def IAx(mt, x):
""" This function evaluates the APV of an increasing life insurance. """
pass
def IAxn(mt, x, n):
""" This function evaluates the APV of an increasing life insurance. """
pass
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x)
def qAxn(nt, x, n, q):
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Axn(mtj, x, n)
def qtAx(nt, x, t, q):
q = float(q)
j = (mt.i - q) / (1 + q) #j = (i-q)/(1+q)
mtj = Actuarial(nt=mt.nt, i=j)
return tAx(mtj, x, t)
def qtAxn(nt, x, t, q):
pass
# Discrete Life Annuities ------------------
def aaxn(mt, x, n, m=1):
""" äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity:
n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period
"""
if m == 1:
return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x]
else:
return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
def axn(mt, x, n, m=1):
""" axn : Return the actuarial present value of a (immediate) temporal (term certain) annuity:
n-year temporary life annuity-late. Payable 'm' per year at the ends of the period
"""
if m == 1:
return (mt.Nx[x + 1] - mt.Nx[x + n + 1]) / mt.Dx[x]
else:
return (mt.Nx[x + 1] - mt.Nx[x + n + 1]) / mt.Dx[x] + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
def aax(mt, x, m=1):
""" äx : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period
"""
return mt.Nx[x] / mt.Dx[x] - (float(m - 1) / float(m * 2))
def ax(mt, x, m=1):
""" ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-late). Payable 'm' per year at the ends of the period
"""
return (mt.Nx[x] / mt.Dx[x] - 1) + (float(m - 1) / float(m * 2))
def taaxn(mt, x, n, m=1):
pass
def taxn(mt, x, n, m=1):
pass
def taax(mt, x, t, m=1):
""" n/äx : Return the actuarial present value of a deferred annuity (deferred n years):
n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period
"""
return mt.Nx[x + t] / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
def tax(mt, x, t, m=1):
""" n/ax : Return the actuarial present value of a deferred annuity (deferred n years):
n-year deferred whole life annuity-late. Payable 'm' per year at the ends of the period
"""
return mt.Nx[x + t + 1] / mt.Dx[x] + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
# Arithmetically increasing annuities (unitary) -----------------
def Iaaxn(mt, x, n, *args):
""" during a term certain, IAn """
return (Sx(mt, x) - Sx(nt, x + n) - n * Nx(nt, x + n)) / Dx(nt, x)
def Iaxn(mt, x, n, *args):
""" during a term certain, IAn """
return (Sx(mt, x + 1) - Sx(mt, x + n + 1) - n * Nx(mt, x + n + 1)) / Dx(mt, x)
def Iaax(mt, x, *args):
""" (Iä)x : Returns the present value of annuity-certain at the beginning of the first year
and increasing linerly. Arithmetically increasing annuity-anticipatory
"""
return Sx(mt, x) / Dx(mt, x)
def Iax(mt, x, *args):
""" (Ia)x : Returns the present value of annuity-certain at the end of the first year
and increasing linerly. Arithmetically increasing annuity-late
"""
return Sx(mt, x + 1) / Dx(mt, x)
def Iaaxn(mt, x, n):
pass
def Iaxn(mt, x, n):
pass
def Itaax(mt, x, t):
""" deffered t years """
return (Sx(mt, x) - Sx(mt, x + t)) / Dx(mt, x)
def Itax(mt, x, t):
""" deffered t years """
return (Sx(mt, x + 1) - Sx(mt, x + t + 1)) / Dx(mt, x)
# Geometrically increasing annuities ---------------
def qax(mt, x, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return ax(mtj, x, m)
def qaax(mt, x, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return aax(mtj, x, m)
def qaxn(mt, x, n, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return axn(mtj, x, n, m)
def qaaxn(mt, x, n, q, m = 1):
""" geometrica """
#i = float(nt[1])
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return aaxn(mtj, x, n, m)
def qtax(mt, x, t, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return tax(mtj, x, t) + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
def qtaax(mt, x, t, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
# Annuity formula ------------
def annuity(mt, x, n, p, m=1 , *args):
"""Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d)
Args:
mt = the mortality table
x = the age as integer number.
n = A integer number (term of insurance in years) or 'w' = whole-life.
(Also, 99 years is defined to be whole-life).
p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid),
Optional variables:
m = Payable 'm' per year (frational payments). Default = 1 (annually)
a or g = a: Arithmetical / g: Geometrical
q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03]
Deferring period:
-d = The n-years deferring period as negative number.
"""
l = len(args)
post = False
incr = False
deff = False
arit = False
wh_l = False
if isinstance(n,str) or n == 99:
wh_l = True
else:
pass
if isinstance(m,int) and m >=0 and l == 0:
pass
elif l == 0 and isinstance(m,list):
args = (m,)
m = 1
incr = True
elif l == 0 and int(m) < 0:
args = False
deff = True
t = int(m) * -1
m = 1
elif l == 1:
if isinstance(args[0], list):
incr = True
elif isinstance(args[0], int):
if isinstance(m, list):
deff = True
incr = True
t = int(args[0]) * -1
args = (m, )
m = 1
else:
deff = True
t = int(args[0]) * -1
args = False
else:
pass
elif l == 2:
if isinstance(args[0], list):
deff = True
t = int(args[1]) * -1
incr = True
elif isinstance(args[0], int):
deff = True
t = int(args[0]) * -1
args = args[1]
else:
pass
else:
pass
if p == 1:
post = True
elif p == 0:
pass
else:
print('Error: payment value is 0 or 1')
if incr:
if 'a' in args[0]:
arit = True
incr = False
elif 'g' in args[0]:
incr = True
q = args[0][1]
else:
return "Error: increasing value is 'a' or 'g'"
else:
pass
if not incr and not deff and not wh_l and not post:
return aaxn(mt, x, n, m)
elif not incr and not deff and not wh_l and post:
return axn(mt, x, n, m)
elif not incr and not deff and wh_l and not post:
return aax(mt, x, m)
elif not incr and not deff and wh_l and post:
return ax(mt, x, m)
elif not incr and deff and not wh_l and not post:
return taaxn(mt, x, n, t, m)
elif not incr and deff and not wh_l and post:
return taxn(mt, x, n, t, m)
elif not incr and deff and wh_l and not post:
return taax(mt, x, t, m)
elif not incr and deff and wh_l and post:
return tax(mt, x, t, m)
elif incr and not deff and not wh_l and not post:
return qaaxn(mt, x, n, q, m)
elif incr and not deff and not wh_l and post:
return qaxn(mt, x, n, q, m)
elif incr and not deff and wh_l and not post:
return qaax(mt, x, q, m)
elif incr and not deff and wh_l and post:
return qax(mt, x, q, m)
elif incr and deff and not wh_l and not post:
return qtaaxn(mt, x, n, t, q, m)
elif incr and deff and not wh_l and post:
return qtaxn(mt, x, n, t, q, m)
elif incr and deff and wh_l and not post:
return qtaax(mt, x, t, q, m)
else:
#elif incr and deff and wh_l and post:
return Itax(mt, x, t)
|
franciscogarate/pyliferisk
|
pyliferisk/__init__.py
|
Python
|
gpl-3.0
| 19,066
|
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import CaseES
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from dimagi.utils.decorators.memoized import memoized
from custom.bihar.reports.display import MCHMotherDisplay, MCHChildDisplay
from dimagi.utils.timezones import utils as tz_utils
import pytz
from custom.bihar.utils import get_all_owner_ids_from_group
class MCHBaseReport(CustomProjectReport, CaseListReport):
ajax_pagination = True
asynchronous = True
exportable = True
emailable = False
fix_left_col = True
report_template_path = "bihar/reports/report.html"
fields = [
'corehq.apps.reports.fields.GroupField',
'corehq.apps.reports.fields.SelectOpenCloseField',
]
@property
def case_filter(self):
group_id = self.request_params.get('group', '')
filters = []
if group_id:
group = Group.get(group_id)
users_in_group = get_all_owner_ids_from_group(group)
if users_in_group:
or_stm = []
for user_id in users_in_group:
or_stm.append({'term': {'owner_id': user_id}})
filters.append({"or": or_stm})
else:
filters.append({'term': {'owner_id': group_id}})
return {'and': filters} if filters else {}
@property
@memoized
def case_es(self):
return CaseES(self.domain)
@property
@memoized
def rendered_report_title(self):
return self.name
def date_to_json(self, date):
return tz_utils.adjust_datetime_to_timezone\
(date, pytz.utc.zone, self.timezone.zone).strftime\
('%d/%m/%Y') if date else ""
class MotherMCHRegister(MCHBaseReport):
name = "Mother MCH register"
slug = "mother_mch_register"
default_case_type = "cc_bihar_pregnancy"
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Mother Name"), sortable=False),
DataTablesColumn(_("Husband Name"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Full address"), sortable=False),
DataTablesColumn(_("MCTS ID"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("Mother DOB / AGE"), sortable=False),
DataTablesColumn(_("JSY beneficiary"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False),
DataTablesColumn(_("LMP"), sortable=False),
DataTablesColumn(_("EDD"), sortable=False)),
DataTablesColumnGroup(
_("First ANC (within 12 weeks)"),
DataTablesColumn(_("ANC 1 Date"), sortable=False),
DataTablesColumn(_("ANC 1 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 1 Weight"), sortable=False),
DataTablesColumn(_("ANC Hb"), sortable=False),
DataTablesColumn(_("ANC1 completed within 12 weeks? "), sortable=False)),
DataTablesColumnGroup(
_("Second ANC (14-26 weeks)"),
DataTablesColumn(_("ANC 2 Date"), sortable=False),
DataTablesColumn(_("ANC 2 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 2 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Third ANC (28-34 weeks)"),
DataTablesColumn(_("ANC 3 Date"), sortable=False),
DataTablesColumn(_("ANC 3 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 3 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Fourth ANC (34 weeks to Delivery)"),
DataTablesColumn(_("ANC 4 Date"), sortable=False),
DataTablesColumn(_("ANC 4 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 4 Weight"), sortable=False),
DataTablesColumn(_("TT1 date"), sortable=False),
DataTablesColumn(_("TT2 date"), sortable=False),
DataTablesColumn(_("TT Booster"), sortable=False),
DataTablesColumn(_("Received date of 100 IFA tablets "), sortable=False),
DataTablesColumn(_("Anemia"), sortable=False),
DataTablesColumn(_("Any complications"), sortable=False),
DataTablesColumn(_("RTI /STI <yes/no>"), sortable=False)),
DataTablesColumnGroup(
_("Pregnancy Outcome"),
DataTablesColumn(_("Date of delivery"), sortable=False),
DataTablesColumn(
_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Nature of delivery"), sortable=False),
DataTablesColumn(_("Complications"), sortable=False),
DataTablesColumn(_("Discharge date"), sortable=False),
DataTablesColumn(_("Received date of JSY benefits"), sortable=False),
DataTablesColumn(_("Abortion type"), sortable=False)),
DataTablesColumnGroup(
_("Post Delivery Details"),
DataTablesColumn(
_("First PNC visit (within 48 hours / within 7 days/ after 7 days)"), sortable=False),
DataTablesColumn(_("Complications after delivery"), sortable=False),
DataTablesColumn(_("Type of family planning adopted after delivery"), sortable=False),
DataTablesColumn(_("Checked mother and infant immediate after delivery?"), sortable=False),
DataTablesColumn(_("Infant outcome number code"), sortable=False)),
DataTablesColumnGroup(
_("Child 1 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 2 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 3 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 4 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False),
DataTablesColumn(_("Migrate status "), sortable=False))
)
return headers
@property
def rows(self):
case_displays = (MCHMotherDisplay(self, self.get_case(case))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.chw_name,
disp.mother_name,
disp.husband_name,
disp.ward_number,
disp.village,
disp.mcts_id,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.jsy_beneficiary,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.lmp,
disp.edd,
disp.anc_date_1,
disp.blood_pressure_1,
disp.weight_1,
disp.hemoglobin,
disp.anc_completed,
disp.anc_date_2,
disp.blood_pressure_2,
disp.weight_2,
disp.anc_date_3,
disp.blood_pressure_3,
disp.weight_3,
disp.anc_date_4,
disp.blood_pressure_4,
disp.weight_4,
disp.tt1_date,
disp.tt2_date,
disp.tt_booster,
disp.ifa_tablets,
disp.anemia,
disp.complications,
disp.rti_sti,
disp.add,
disp.home_sba_assist,
disp.delivery_nature,
disp.complications,
disp.discharge_date,
disp.jsy_money_date,
disp.abortion_type,
disp.first_pnc_time,
disp.delivery_complications,
disp.family_planning_type,
disp.all_pnc_on_time,
disp.num_children,
disp.case_name_1,
disp.gender_1,
disp.first_weight_1,
disp.breastfed_hour_1,
disp.case_name_2,
disp.gender_2,
disp.first_weight_2,
disp.breastfed_hour_2,
disp.case_name_3,
disp.gender_3,
disp.first_weight_3,
disp.breastfed_hour_3,
disp.case_name_4,
disp.gender_4,
disp.first_weight_4,
disp.breastfed_hour_4,
disp.status
]
class ChildMCHRegister(MCHBaseReport):
name = "Child MCH register"
slug = "child_mch_register"
default_case_type = "cc_bihar_newborn"
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Child Name"), sortable=False),
DataTablesColumn(_("Father and Mother Name"), sortable=False),
DataTablesColumn(_("Mother's MCTS ID"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Address"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("DOB / AGE"), sortable=False),
DataTablesColumn(_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False)),
DataTablesColumnGroup(
_("At Birth"),
DataTablesColumn(_("BCG"), sortable=False),
DataTablesColumn(_("OPV0"), sortable=False),
DataTablesColumn(_("Hepatitis-Birth dose "), sortable=False)),
DataTablesColumnGroup(
_("At 6 Weeks"),
DataTablesColumn(_("DPT1"), sortable=False),
DataTablesColumn(_("OPV1"), sortable=False),
DataTablesColumn(_("Hepatitis-B1"), sortable=False)),
DataTablesColumnGroup(
_("At 10 Weeks"),
DataTablesColumn(_("DPT2"), sortable=False),
DataTablesColumn(_("OPV2"), sortable=False),
DataTablesColumn(_("Hepatitis-B2"), sortable=False)),
DataTablesColumnGroup(
_("At 14 Weeks"),
DataTablesColumn(_("DPT3"), sortable=False),
DataTablesColumn(_("OPV3"), sortable=False),
DataTablesColumn(_("Hepatitis-B3"), sortable=False)),
DataTablesColumnGroup(
_("Between 9-12 Months"),
DataTablesColumn(_("Measles (1st dose)"), sortable=False)),
DataTablesColumnGroup(
_("Between 16-24 Months"),
DataTablesColumn(
_("Vitamin A dose-1 "), sortable=False),
DataTablesColumn(_("Measles (2nd dose)/ MR Vaccine"))),
DataTablesColumnGroup(
_("After 2 Years"),
DataTablesColumn(_("DPT Booster"), sortable=False),
DataTablesColumn(_("OPV Booster"), sortable=False),
DataTablesColumn(_("Vitamin A dose-2"), sortable=False),
DataTablesColumn(_("Vitamin A dose-3"), sortable=False),
DataTablesColumn(_("JE Vaccine"), sortable=False))
)
return headers
@property
def rows(self):
case_displays = (MCHChildDisplay(self, self.get_case(case))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.chw_name,
disp.child_name,
disp.father_mother_name,
disp.mcts_id,
disp.gender,
disp.ward_number,
disp.village,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.home_sba_assist,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.bcg_date,
disp.opv_0_date,
disp.hep_b_0_date,
disp.dpt_1_date,
disp.opv_1_date,
disp.hep_b_1_date,
disp.dpt_2_date,
disp.opv_2_date,
disp.hep_b_2_date,
disp.dpt_3_date,
disp.opv_3_date,
disp.hep_b_3_date,
disp.measles_date,
disp.vit_a_1_date,
disp.date_measles_booster,
disp.dpt_booster_date,
disp.opv_booster_date,
disp.vit_a_2_date,
disp.vit_a_3_date,
disp.date_je
]
|
gmimano/commcaretest
|
custom/bihar/reports/mch_reports.py
|
Python
|
bsd-3-clause
| 19,214
|
from bingads.v13.bulk.entities import *
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.extensions import *
class BulkAdGroupNegativeLocationCriterion(_SingleRecordBulkEntity):
""" Represents an Ad Group Negative Location Criterion that can be read or written in a bulk file.
This class exposes the :attr:`negative_ad_group_criterion` property that can be read and written as fields of the
Ad Group Negative Location Criterion record in a bulk file.
For more information, see Ad Group Negative Location Criterion at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
negative_ad_group_criterion=None,
campaign_name=None,
ad_group_name=None, ):
super(BulkAdGroupNegativeLocationCriterion, self).__init__()
self._negative_ad_group_criterion = negative_ad_group_criterion
self._campaign_name = campaign_name
self._ad_group_name =ad_group_name
_MAPPINGS = [
_SimpleBulkMapping(
_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.negative_ad_group_criterion.Status),
csv_to_field=lambda c, v: setattr(c.negative_ad_group_criterion, 'Status', v if v else None)
),
_SimpleBulkMapping(
_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.negative_ad_group_criterion.Id),
csv_to_field=lambda c, v: setattr(c.negative_ad_group_criterion, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.negative_ad_group_criterion.AdGroupId),
csv_to_field=lambda c, v: setattr(c.negative_ad_group_criterion, 'AdGroupId', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, 'campaign_name', v)
),
_SimpleBulkMapping(
_StringTable.AdGroup,
field_to_csv=lambda c: c.ad_group_name,
csv_to_field=lambda c, v: setattr(c, 'ad_group_name', v)
),
_SimpleBulkMapping(
_StringTable.Target,
field_to_csv=lambda c: field_to_csv_LocationTarget(c.negative_ad_group_criterion),
csv_to_field=lambda c, v: csv_to_field_LocationTarget(c.negative_ad_group_criterion, int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.SubType,
field_to_csv=lambda c: field_to_csv_LocationType(c.negative_ad_group_criterion),
csv_to_field=lambda c, v: csv_to_field_LocationType(c.negative_ad_group_criterion, v)
),
_SimpleBulkMapping(
_StringTable.Name,
field_to_csv=lambda c: field_to_csv_LocationName(c.negative_ad_group_criterion),
csv_to_field=lambda c, v: csv_to_field_LocationName(c.negative_ad_group_criterion, v)
),
]
@property
def negative_ad_group_criterion(self):
""" Defines a Ad Group Criterion """
return self._negative_ad_group_criterion
@negative_ad_group_criterion.setter
def negative_ad_group_criterion(self, negative_ad_group_criterion):
self._negative_ad_group_criterion = negative_ad_group_criterion
@property
def campaign_name(self):
""" The name of the Campaign
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
@property
def ad_group_name(self):
""" The name of the Ad Group
:rtype: str
"""
return self._ad_group_name
@ad_group_name.setter
def ad_group_name(self, ad_group_name):
self._ad_group_name = ad_group_name
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.negative_ad_group_criterion, 'negative_ad_group_criterion')
self.convert_to_values(row_values, BulkAdGroupNegativeLocationCriterion._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._negative_ad_group_criterion = _CAMPAIGN_OBJECT_FACTORY_V13.create('NegativeAdGroupCriterion')
self._negative_ad_group_criterion.Type = 'NegativeAdGroupCriterion'
self._negative_ad_group_criterion.Criterion = _CAMPAIGN_OBJECT_FACTORY_V13.create('LocationCriterion')
self._negative_ad_group_criterion.Criterion.Type = 'LocationCriterion'
row_values.convert_to_entity(self, BulkAdGroupNegativeLocationCriterion._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkAdGroupNegativeLocationCriterion, self).read_additional_data(stream_reader)
|
bing-ads-sdk/BingAds-Python-SDK
|
bingads/v13/bulk/entities/target_criterions/bulk_ad_group_negative_location_criterion.py
|
Python
|
mit
| 5,218
|
import json, io, re, requests
from bs4 import BeautifulSoup
from datetime import datetime
def get_datasets(url):
r = requests.get(url.format(0))
soup = BeautifulSoup(r.text)
href = soup.select('#block-system-main a')[-1]['href']
last_page = int(re.match(r'.*page=(.*)', href).group(1))
for page in range(last_page + 1):
print( '[DEBUG] page:', page )
r = requests.get(url.format(page))
soup = BeautifulSoup(r.text)
for link in soup.select('h2 a'):
yield (link['href'], link.text)
def get_metadata(url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
metadata = dict()
metadata['_url'] = url.format(d)
metadata['_collection_date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for elem in soup.select('.datasetview_container .datasetview_row'):
for field in elem.select('.field'):
label = field.select('.field-label')[0].text[:-2]
item_list = list()
item = field.select('.field-item')
if label == 'Website':
metadata[label] = item[0].select('a')[0]['href']
elif len(item) == 0:
items = elem.select('.tag_list a')
for i in items:
item_list.append(i.text.strip())
metadata[label] = item_list
else:
metadata[label] = item[0].text.strip()
tags = set()
for elem in soup.select('.tag_list a'):
tags.add(elem.text.strip())
metadata['tags'] = list(tags)
return metadata
if __name__ == '__main__':
base_url = 'http://daten.berlin.de{}'
datasets_url = 'http://daten.berlin.de/datensaetze?page={}'
documents_url = 'http://daten.berlin.de/dokumente?page={}'
all_labels = set()
all_metadata = list()
done_datasets = set()
# iterate over all dataset urls
for d, t in get_datasets(datasets_url):
if d in done_datasets:
print('skip', d)
continue # skip datasets
m = get_metadata(base_url.format(d))
m['_type'] = 'dataset'
m['_title'] = t
all_metadata.append(m)
for k in m.keys(): all_labels.add(k)
print(json.dumps(m, sort_keys=1, ensure_ascii=False))
done_datasets.add(d)
# iterate over all document urls
for d, t in get_datasets(documents_url):
if d in done_datasets:
print('skip', d)
continue # skip datasets
m = get_metadata(base_url.format(d))
m['_type'] = 'document'
m['_title'] = t
all_metadata.append(m)
for k in m.keys(): all_labels.add(k)
print(json.dumps(m, sort_keys=1, ensure_ascii=False))
done_datasets.add(d)
# write json file
with io.open('daten-berlin_metadata.json', 'w', encoding='utf8') as json_file:
json_file.write((json.dumps(all_metadata, indent=2, sort_keys=True, ensure_ascii=False)))
# write csv
with open('daten-berlin_metadata.csv', 'wb') as csv_file:
for l in sorted(all_labels):
csv_file.write((l + ';').encode('utf8'))
csv_file.write('\n'.encode('utf8'))
for m in all_metadata:
for l in sorted(all_labels):
if l in m:
csv_file.write(str(m[l]).encode('utf8'))
csv_file.write(';'.encode('utf8'))
csv_file.write('\n'.encode('utf8'))
|
nbi-opendata/metadaten-scraper
|
get-metadata.py
|
Python
|
mit
| 3,412
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
1.0 - Initial version.
1.1 - Added migration_update
1.2 - Added instance_get_by_uuid and instance_get_all_by_host
1.3 - Added aggregate_host_add and aggregate_host_delete
1.4 - Added migration_get
1.5 - Added bw_usage_update
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
1.9 - Added provider_fw_rule_get_all
1.10 - Added agent_build_get_by_triple
1.11 - Added aggregate_get
1.12 - Added block_device_mapping_update_or_create
1.13 - Added block_device_mapping_get_all_by_instance
1.14 - Added block_device_mapping_destroy
1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
1.19 - Added vol_get_usage_by_time and vol_usage_update
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
1.27 - Added service_create
1.28 - Added binary arg to service_get_all_by
1.29 - Added service_destroy
1.30 - Added migration_create
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
1.36 - Added instance_fault_create
1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
1.38 - Added service name to instance_update
1.39 - Added notify_usage_exists
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
1.43 - Added compute_stop
1.44 - Added compute_node_delete
1.45 - Added project_id to quota_commit and quota_rollback
1.46 - Added compute_confirm_resize
1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
1.49 - Added columns_to_join to instance_get_by_uuid
1.50 - Added object_action() and object_class_action()
1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
1.52 - Pass instance objects for compute_confirm_resize
1.53 - Added compute_reboot
1.54 - Added 'update_cells' argument to bw_usage_update
1.55 - Pass instance objects for compute_stop
1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
1.57 - Remove migration_create()
1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
1.59 - Remove instance_info_cache_update()
1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
1.61 - Return deleted instance from instance_destroy()
1.62 - Added object_backport()
1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
1.64 - Added use_slave to instance_get_all_filters()
... - Remove instance_type_get()
... - Remove aggregate_get()
... - Remove aggregate_get_by_host()
... - Remove instance_get()
... - Remove migration_update()
... - Remove block_device_mapping_destroy()
2.0 - Drop backwards compatibility
... - Remove quota_rollback() and quota_commit()
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
kwargs = {'instance_uuid': instance_uuid,
'columns_to_join': columns_to_join}
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_by_uuid', **kwargs)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
cctxt = self.client.prepare()
return cctxt.call(context,
'migration_get_in_progress_by_host_and_node',
host=host, node=node)
def aggregate_host_add(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_host_add',
aggregate=aggregate_p,
host=host)
def aggregate_host_delete(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_host_delete',
aggregate=aggregate_p,
host=host)
def aggregate_metadata_get_by_host(self, context, host, key):
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_metadata_get_by_host',
host=host,
key=key)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None, update_cells=True):
msg_kwargs = dict(uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out, last_ctr_in=last_ctr_in,
last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed,
update_cells=update_cells)
cctxt = self.client.prepare()
return cctxt.call(context, 'bw_usage_update', **msg_kwargs)
def provider_fw_rule_get_all(self, context):
cctxt = self.client.prepare()
return cctxt.call(context, 'provider_fw_rule_get_all')
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
cctxt = self.client.prepare()
return cctxt.call(context, 'agent_build_get_by_triple',
hypervisor=hypervisor, os=os,
architecture=architecture)
def block_device_mapping_update_or_create(self, context, values,
create=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_update_or_create',
values=values, create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_get_all_by_instance',
instance=instance_p, legacy=legacy)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None,
use_slave=False):
msg_kwargs = dict(filters=filters, sort_key=sort_key,
sort_dir=sort_dir, columns_to_join=columns_to_join,
use_slave=use_slave)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_all_by_filters', **msg_kwargs)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_active_by_window_joined',
begin=begin, end=end, project_id=project_id,
host=host)
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_destroy', instance=instance_p)
def instance_info_cache_delete(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
cctxt.call(context, 'instance_info_cache_delete', instance=instance_p)
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_get_usage_by_time',
start_time=start_time_p)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_usage_update',
vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_get_all_by',
topic=topic, host=host, binary=binary)
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_all_by_host',
host=host, node=node,
columns_to_join=columns_to_join)
def instance_fault_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_fault_create', values=values)
def action_event_start(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare()
return cctxt.call(context, 'action_event_start', values=values_p)
def action_event_finish(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare()
return cctxt.call(context, 'action_event_finish', values=values_p)
def service_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_create', values=values)
def service_destroy(self, context, service_id):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_destroy', service_id=service_id)
def compute_node_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_create', values=values)
def compute_node_update(self, context, node, values):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_update',
node=node_p, values=values)
def compute_node_delete(self, context, node):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_delete', node=node_p)
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
cctxt = self.client.prepare()
return cctxt.call(context, 'service_update',
service=service_p, values=values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_get',
task_name=task_name, begin=begin, end=end,
host=host, state=state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_begin_task',
task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_end_task',
task_name=task_name, begin=begin, end=end,
host=host, errors=errors, message=message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
instance_p = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
cctxt = self.client.prepare()
return cctxt.call(
context, 'notify_usage_exists',
instance=instance_p,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_handler',
event=event, args=args_p)
def security_groups_trigger_members_refresh(self, context, group_ids):
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_members_refresh',
group_ids=group_ids)
def network_migrate_instance_start(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare()
return cctxt.call(context, 'network_migrate_instance_start',
instance=instance_p, migration=migration_p)
def network_migrate_instance_finish(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare()
return cctxt.call(context, 'network_migrate_instance_finish',
instance=instance_p, migration=migration_p)
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'get_ec2_ids',
instance=instance_p)
def compute_unrescue(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_unrescue', instance=instance_p)
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action',
objname=objname, objmethod=objmethod,
objver=objver, args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport(self, context, objinst, target_version):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport', objinst=objinst,
target_version=target_version)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None):
if self.client.can_send_version('1.6'):
version = '1.6'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
flavor_p = jsonutils.to_primitive(flavor)
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server',
instance=instance, scheduler_hint=scheduler_hint,
live=live, rebuild=rebuild, flavor=flavor_p,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
reservations=reservations)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(version='1.5')
cctxt.cast(context, 'build_instances',
instances=instances, image=image_p,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
cctxt = self.client.prepare(version='1.3')
cctxt.cast(context, 'unshelve_instance', instance=instance)
|
eharney/nova
|
nova/conductor/rpcapi.py
|
Python
|
apache-2.0
| 21,580
|
#!/usr/bin/env python
import tempfile
import sys
import subprocess
import shutil
import os
import hashlib
import contextlib
import gzip
import fnmatch
import tarfile
import zipfile
def generate_file_list(directory):
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirpath, filename)
def sha1_file(name, checksum=None):
CHUNKSIZE = 1024
if checksum is None:
checksum = hashlib.sha1()
if fnmatch.fnmatch(name, "*.dat"):
opener = gzip.open
else:
opener = open
with contextlib.closing(opener(name, 'rb')) as data:
chunk = data.read(CHUNKSIZE)
while len(chunk) == CHUNKSIZE:
checksum.update(chunk)
chunk = data.read(CHUNKSIZE)
else:
checksum.update(chunk)
return checksum
def calculate_result(directory):
checksum = hashlib.sha1()
for filename in sorted(generate_file_list(directory)):
if filename.endswith("session.lock"):
continue
sha1_file(filename, checksum)
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_directory(prefix='regr'):
name = tempfile.mkdtemp(prefix)
try:
yield name
finally:
shutil.rmtree(name)
@contextlib.contextmanager
def directory_clone(src):
with temporary_directory('regr') as name:
subdir = os.path.join(name, "subdir")
shutil.copytree(src, subdir)
yield subdir
def launch_subprocess(directory, arguments, env=None):
#my python breaks with an empty environ, i think it wants PATH
#if sys.platform == "win32":
if env is None:
env = {}
newenv = {}
newenv.update(os.environ)
newenv.update(env)
proc = subprocess.Popen((["python.exe"] if sys.platform == "win32" else []) + [
"./mce.py",
directory] + arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=newenv)
return proc
class RegressionError(Exception):
pass
def do_test(test_data, result_check, arguments=()):
"""Run a regression test on the given world.
result_check - sha1 of the recursive tree generated
arguments - arguments to give to mce.py on execution
"""
result_check = result_check.lower()
env = {
'MCE_RANDOM_SEED': '42',
'MCE_LAST_PLAYED': '42',
}
if 'MCE_PROFILE' in os.environ:
env['MCE_PROFILE'] = os.environ['MCE_PROFILE']
with directory_clone(test_data) as directory:
proc = launch_subprocess(directory, arguments, env)
proc.stdin.close()
proc.wait()
if proc.returncode:
raise RegressionError("Program execution failed!")
checksum = calculate_result(directory).lower()
if checksum != result_check.lower():
raise RegressionError("Checksum mismatch: {0!r} != {1!r}".format(checksum, result_check))
print "[OK] (sha1sum of result is {0!r}, as expected)".format(result_check)
def do_test_match_output(test_data, result_check, arguments=()):
result_check = result_check.lower()
env = {
'MCE_RANDOM_SEED': '42',
'MCE_LAST_PLAYED': '42'
}
with directory_clone(test_data) as directory:
proc = launch_subprocess(directory, arguments, env)
proc.stdin.close()
output = proc.stdout.read()
proc.wait()
if proc.returncode:
raise RegressionError("Program execution failed!")
print "Output\n{0}".format(output)
checksum = hashlib.sha1()
checksum.update(output)
checksum = checksum.hexdigest()
if checksum != result_check.lower():
raise RegressionError("Checksum mismatch: {0!r} != {1!r}".format(checksum, result_check))
print "[OK] (sha1sum of result is {0!r}, as expected)".format(result_check)
alpha_tests = [
(do_test, 'baseline', '2bf250ec4e5dd8bfd73b3ccd0a5ff749569763cf', []),
(do_test, 'degrief', '2b7eecd5e660f20415413707b4576b1234debfcb', ['degrief']),
(do_test_match_output, 'analyze', '9cb4aec2ed7a895c3a5d20d6e29e26459e00bd53', ['analyze']),
(do_test, 'relight', 'f3b3445b0abca1fe2b183bc48b24fb734dfca781', ['relight']),
(do_test, 'replace', '4e816038f9851817b0d75df948d058143708d2ec', ['replace', 'Water (active)', 'with', 'Lava (active)']),
(do_test, 'fill', '94566d069edece4ff0cc52ef2d8f877fbe9720ab', ['fill', 'Water (active)']),
(do_test, 'heightmap', '71c20e7d7e335cb64b3eb0e9f6f4c9abaa09b070', ['heightmap', 'regression_test/mars.png']),
]
import optparse
parser = optparse.OptionParser()
parser.add_option("--profile", help="Perform profiling on regression tests", action="store_true")
def main(argv):
options, args = parser.parse_args(argv)
if len(args) <= 1:
do_these_regressions = ['*']
else:
do_these_regressions = args[1:]
with directory_clone("testfiles/AnvilWorld") as directory:
test_data = directory
passes = []
fails = []
for func, name, sha, args in alpha_tests:
print "Starting regression {0} ({1})".format(name, args)
if any(fnmatch.fnmatch(name, x) for x in do_these_regressions):
if options.profile:
print >> sys.stderr, "Starting to profile to %s.profile" % name
os.environ['MCE_PROFILE'] = '%s.profile' % name
try:
func(test_data, sha, args)
except RegressionError, e:
fails.append("Regression {0} failed: {1}".format(name, e))
print fails[-1]
else:
passes.append("Regression {0!r} complete.".format(name))
print passes[-1]
print "{0} tests passed.".format(len(passes))
for line in fails:
print line
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ahh2131/mchisel
|
run_regression_test.py
|
Python
|
isc
| 5,935
|
#!/usr/bin/env python
import pytest
if pytest.PYVER[:2] == (3, 3):
pytest.skip("Broken on Python 3.3")
from circuits.web import Controller
from circuits.web.tools import check_auth, digest_auth
from .helpers import HTTPError, HTTPDigestAuthHandler
from .helpers import urlopen, build_opener, install_opener
class Root(Controller):
def index(self):
realm = "Test"
users = {"admin": "admin"}
if check_auth(self.request, self.response, realm, users):
return "Hello World!"
return digest_auth(self.request, self.response, realm, users)
def test(webapp):
try:
f = urlopen(webapp.server.http.base)
except HTTPError as e:
assert e.code == 401
assert e.msg == "Unauthorized"
else:
assert False
handler = HTTPDigestAuthHandler()
handler.add_password("Test", webapp.server.http.base, "admin", "admin")
opener = build_opener(handler)
install_opener(opener)
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
install_opener(None)
|
treemo/circuits
|
tests/web/test_digestauth.py
|
Python
|
mit
| 1,085
|
import control_grid
from os import system,popen
import random
import re
class nn_task(control_grid.specific_task):
def execute(self,iden):
iden_all = dict()
string_dir = "t_";
for i in sorted(iden):
s = (i.split(":"))[0]
iden_all[s] = (i.split(":"))[1]
string_dir += (i.split(":"))[1]
string_dir += "_"
string_dir += str(hash(iden))[-3:]
#the dir
system("mkdir ../%s" % string_dir)
#write conf file
conf_f = open("../%s/conf" % string_dir,"w")
template_f = open(control_grid.file_template,"r");
for line in template_f:
x = line.split()
if(len(x)>0):
if x[0] in iden_all.keys():
conf_f.write("%s %s\n"%(x[0],iden_all[x[0]]))
else:
conf_f.write(line)
conf_f.close()
template_f.close()
#execute
system("cd ../%s;../ztrain conf >> log.test" % string_dir)
#system("cd ../%s;echo zzzzz %s %s %s %s %s %s > log.test;sleep %s" % string_dir,1,1,1,1,1,random.random(),random.randint(5,10))) #onlyfortesing
test_iter = 6 #iter6, which is the 7th iter
log_file = "../%s/log.test" % string_dir;
ret = popen("cat %s | grep '^zzzzz'" % log_file).read()
ret = ret.split()[1:]
print ret
res = [float(i) for i in ret]
#clean up
system("cd ../%s; rm *.mach.*; rm output.*" % string_dir)
##new test
'''
pattern_iter = re.compile(r"Start training for iter %s" % test_iter)
pattern_res = re.compile(r"Unlabeled Accuracy No Punc:[ \t]*([0-9]*.[0-9]*)")
res = -1
ff = open(log_file,"r")
in_iter = False
for line in ff:
if(in_iter):
x_res=pattern_res.search(line)
if(x_res):
res = (float)(x_res.group(1))
break
else:
if(pattern_iter.search(line)):
in_iter = True
ff.close()
print("Finish one of %s with %g" % (iden,res))
'''
return res
def high_order(self,s1,s2):
return max(s1)>max(s2)
if __name__ == "__main__":
control_grid.ct_init()
control_grid.ct_main(nn_task())
|
zzzsss/parsing2
|
others/tune_nn.py
|
Python
|
lgpl-3.0
| 2,399
|
"""
Tests for the mhlib module
Nick Mathewson
"""
### BUG: This suite doesn't currently test the mime functionality of
### mhlib. It should.
import unittest
from test.test_support import run_unittest, TESTFN, TestSkipped, import_module
import os, StringIO
import sys
mhlib = import_module('mhlib', deprecated=True)
if (sys.platform.startswith("win") or sys.platform=="riscos" or
sys.platform.startswith("atheos")):
# mhlib.updateline() renames a file to the name of a file that already
# exists. That causes a reasonable OS <wink> to complain in test_sequence
# here, like the "OSError: [Errno 17] File exists" raised on Windows.
# mhlib's listsubfolders() and listallfolders() do something with
# link counts, and that causes test_listfolders() here to get back
# an empty list from its call of listallfolders().
# The other tests here pass on Windows.
raise TestSkipped("skipped on %s -- " % sys.platform +
"too many Unix assumptions")
_mhroot = TESTFN+"_MH"
_mhpath = os.path.join(_mhroot, "MH")
_mhprofile = os.path.join(_mhroot, ".mh_profile")
def normF(f):
return os.path.join(*f.split('/'))
def writeFile(fname, contents):
dir = os.path.split(fname)[0]
if dir and not os.path.exists(dir):
mkdirs(dir)
f = open(fname, 'w')
f.write(contents)
f.close()
def readFile(fname):
f = open(fname)
r = f.read()
f.close()
return r
def writeProfile(dict):
contents = [ "%s: %s\n" % (k, v) for k, v in dict.iteritems() ]
writeFile(_mhprofile, "".join(contents))
def writeContext(folder):
folder = normF(folder)
writeFile(os.path.join(_mhpath, "context"),
"Current-Folder: %s\n" % folder)
def writeCurMessage(folder, cur):
folder = normF(folder)
writeFile(os.path.join(_mhpath, folder, ".mh_sequences"),
"cur: %s\n"%cur)
def writeMessage(folder, n, headers, body):
folder = normF(folder)
headers = "".join([ "%s: %s\n" % (k, v) for k, v in headers.iteritems() ])
contents = "%s\n%s\n" % (headers,body)
mkdirs(os.path.join(_mhpath, folder))
writeFile(os.path.join(_mhpath, folder, str(n)), contents)
def getMH():
return mhlib.MH(os.path.abspath(_mhpath), _mhprofile)
def sortLines(s):
lines = s.split("\n")
lines = [ line.strip() for line in lines if len(line) >= 2 ]
lines.sort()
return lines
# These next 2 functions are copied from test_glob.py.
def mkdirs(fname):
if os.path.exists(fname) or fname == '':
return
base, file = os.path.split(fname)
mkdirs(base)
os.mkdir(fname)
def deltree(fname):
if not os.path.exists(fname):
return
for f in os.listdir(fname):
fullname = os.path.join(fname, f)
if os.path.isdir(fullname):
deltree(fullname)
else:
try:
os.unlink(fullname)
except:
pass
try:
os.rmdir(fname)
except:
pass
class MhlibTests(unittest.TestCase):
def setUp(self):
deltree(_mhroot)
mkdirs(_mhpath)
writeProfile({'Path' : os.path.abspath(_mhpath),
'Editor': 'emacs',
'ignored-attribute': 'camping holiday'})
# Note: These headers aren't really conformant to RFC822, but
# mhlib shouldn't care about that.
# An inbox with a couple of messages.
writeMessage('inbox', 1,
{'From': 'Mrs. Premise',
'To': 'Mrs. Conclusion',
'Date': '18 July 2001'}, "Hullo, Mrs. Conclusion!\n")
writeMessage('inbox', 2,
{'From': 'Mrs. Conclusion',
'To': 'Mrs. Premise',
'Date': '29 July 2001'}, "Hullo, Mrs. Premise!\n")
# A folder with many messages
for i in range(5, 101)+range(101, 201, 2):
writeMessage('wide', i,
{'From': 'nowhere', 'Subject': 'message #%s' % i},
"This is message number %s\n" % i)
# A deeply nested folder
def deep(folder, n):
writeMessage(folder, n,
{'Subject': 'Message %s/%s' % (folder, n) },
"This is message number %s in %s\n" % (n, folder) )
deep('deep/f1', 1)
deep('deep/f1', 2)
deep('deep/f1', 3)
deep('deep/f2', 4)
deep('deep/f2', 6)
deep('deep', 3)
deep('deep/f2/f3', 1)
deep('deep/f2/f3', 2)
def tearDown(self):
deltree(_mhroot)
def test_basic(self):
writeContext('inbox')
writeCurMessage('inbox', 2)
mh = getMH()
eq = self.assertEquals
eq(mh.getprofile('Editor'), 'emacs')
eq(mh.getprofile('not-set'), None)
eq(mh.getpath(), os.path.abspath(_mhpath))
eq(mh.getcontext(), 'inbox')
mh.setcontext('wide')
eq(mh.getcontext(), 'wide')
eq(readFile(os.path.join(_mhpath, 'context')),
"Current-Folder: wide\n")
mh.setcontext('inbox')
inbox = mh.openfolder('inbox')
eq(inbox.getfullname(),
os.path.join(os.path.abspath(_mhpath), 'inbox'))
eq(inbox.getsequencesfilename(),
os.path.join(os.path.abspath(_mhpath), 'inbox', '.mh_sequences'))
eq(inbox.getmessagefilename(1),
os.path.join(os.path.abspath(_mhpath), 'inbox', '1'))
def test_listfolders(self):
mh = getMH()
eq = self.assertEquals
folders = mh.listfolders()
folders.sort()
eq(folders, ['deep', 'inbox', 'wide'])
folders = mh.listallfolders()
folders.sort()
tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
'inbox', 'wide'])
tfolders.sort()
eq(folders, tfolders)
folders = mh.listsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2']))
folders = mh.listallsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
eq(mh.listsubfolders('inbox'), [])
eq(mh.listallsubfolders('inbox'), [])
def test_sequence(self):
mh = getMH()
eq = self.assertEquals
writeCurMessage('wide', 55)
f = mh.openfolder('wide')
all = f.listmessages()
eq(all, range(5, 101)+range(101, 201, 2))
eq(f.getcurrent(), 55)
f.setcurrent(99)
eq(readFile(os.path.join(_mhpath, 'wide', '.mh_sequences')),
'cur: 99\n')
def seqeq(seq, val):
eq(f.parsesequence(seq), val)
seqeq('5-55', range(5, 56))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('10:10', range(10, 20))
seqeq('10:+10', range(10, 20))
seqeq('101:10', range(101, 121, 2))
seqeq('cur', [99])
seqeq('.', [99])
seqeq('prev', [98])
seqeq('next', [100])
seqeq('cur:-3', [97, 98, 99])
seqeq('first-cur', range(5, 100))
seqeq('150-last', range(151, 201, 2))
seqeq('prev-next', [98, 99, 100])
lowprimes = [5, 7, 11, 13, 17, 19, 23, 29]
lowcompos = [x for x in range(5, 31) if not x in lowprimes ]
f.putsequences({'cur': [5],
'lowprime': lowprimes,
'lowcompos': lowcompos})
seqs = readFile(os.path.join(_mhpath, 'wide', '.mh_sequences'))
seqs = sortLines(seqs)
eq(seqs, ["cur: 5",
"lowcompos: 6 8-10 12 14-16 18 20-22 24-28 30",
"lowprime: 5 7 11 13 17 19 23 29"])
seqeq('lowprime', lowprimes)
seqeq('lowprime:1', [5])
seqeq('lowprime:2', [5, 7])
seqeq('lowprime:-2', [23, 29])
## Not supported
#seqeq('lowprime:first', [5])
#seqeq('lowprime:last', [29])
#seqeq('lowprime:prev', [29])
#seqeq('lowprime:next', [29])
def test_modify(self):
mh = getMH()
eq = self.assertEquals
mh.makefolder("dummy1")
self.assert_("dummy1" in mh.listfolders())
path = os.path.join(_mhpath, "dummy1")
self.assert_(os.path.exists(path))
f = mh.openfolder('dummy1')
def create(n):
msg = "From: foo\nSubject: %s\n\nDummy Message %s\n" % (n,n)
f.createmessage(n, StringIO.StringIO(msg))
create(7)
create(8)
create(9)
eq(readFile(f.getmessagefilename(9)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
eq(f.listmessages(), [7, 8, 9])
files = os.listdir(path)
files.sort()
eq(files, ['7', '8', '9'])
f.removemessages(['7', '8'])
files = os.listdir(path)
files.sort()
eq(files, [',7', ',8', '9'])
eq(f.listmessages(), [9])
create(10)
create(11)
create(12)
mh.makefolder("dummy2")
f2 = mh.openfolder("dummy2")
eq(f2.listmessages(), [])
f.movemessage(10, f2, 3)
f.movemessage(11, f2, 5)
eq(f.listmessages(), [9, 12])
eq(f2.listmessages(), [3, 5])
eq(readFile(f2.getmessagefilename(3)),
"From: foo\nSubject: 10\n\nDummy Message 10\n")
f.copymessage(9, f2, 4)
eq(f.listmessages(), [9, 12])
eq(readFile(f2.getmessagefilename(4)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
f.refilemessages([9, 12], f2)
eq(f.listmessages(), [])
eq(f2.listmessages(), [3, 4, 5, 6, 7])
eq(readFile(f2.getmessagefilename(7)),
"From: foo\nSubject: 12\n\nDummy Message 12\n")
# XXX This should check that _copysequences does the right thing.
mh.deletefolder('dummy1')
mh.deletefolder('dummy2')
self.assert_('dummy1' not in mh.listfolders())
self.assert_(not os.path.exists(path))
def test_read(self):
mh = getMH()
eq = self.assertEquals
f = mh.openfolder('inbox')
msg = f.openmessage(1)
# Check some basic stuff from rfc822
eq(msg.getheader('From'), "Mrs. Premise")
eq(msg.getheader('To'), "Mrs. Conclusion")
# Okay, we have the right message. Let's check the stuff from
# mhlib.
lines = sortLines(msg.getheadertext())
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise",
"To: Mrs. Conclusion"])
lines = sortLines(msg.getheadertext(lambda h: len(h)==4))
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise"])
eq(msg.getbodytext(), "Hullo, Mrs. Conclusion!\n\n")
eq(msg.getbodytext(0), "Hullo, Mrs. Conclusion!\n\n")
# XXXX there should be a better way to reclaim the file handle
msg.fp.close()
del msg
def test_main():
run_unittest(MhlibTests)
if __name__ == "__main__":
test_main()
|
leighpauls/k2cro4
|
third_party/python_26/Lib/test/test_mhlib.py
|
Python
|
bsd-3-clause
| 11,145
|
#
# Copyright 2015-2016 Free Software Foundation, Inc.
#
# This file is part of PyBOMBS
#
# PyBOMBS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# PyBOMBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyBOMBS; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Pseudo-Packager: Test command
"""
import re
import subprocess
from pybombs.packagers.extern import ExternCmdPackagerBase, ExternReadOnlyPackager
class ExternalTestCmd(ExternReadOnlyPackager):
" Wrapper around running a command "
def __init__(self, logger):
ExternReadOnlyPackager.__init__(self, logger)
def get_installed_version(self, command):
"""
Run command, see if it works. If the output has a version number in
x.y.z format, return that. If it doesn't, but the command ran, return
True. If it fails, return False. ezpz.
"""
try:
# If this fails, it almost always throws.
# NOTE: the split is to handle multi-argument commands. There's
# cases where this is not intended, e.g. it won't handle argument
# with spaces! But currently this is preferable to running the
# command in a shell.
output = subprocess.check_output(command.split(), stderr=subprocess.STDOUT).strip()
ver = re.search(
r'(?P<ver>[0-9]+\.[0-9]+(\.[0-9]+)?)',
output,
re.MULTILINE
)
if ver is None:
self.log.debug("Could run, but couldn't find a version number.")
return True
ver = ver.group('ver')
self.log.debug("Found version number: {0}".format(ver))
return ver
except (subprocess.CalledProcessError, OSError):
# We'll assume it's not installed
return False
except Exception as e:
self.log.error("Running `{0}` failed.".format(command))
self.log.obnoxious(str(e))
return False
class TestCommand(ExternCmdPackagerBase):
"""
Checks if something is installed by running a command.
Can't really install stuff, but is useful for finding out if something is
already installed, e.g. from source.
"""
name = 'cmd'
pkgtype = 'cmd'
def __init__(self):
ExternCmdPackagerBase.__init__(self)
self.packager = ExternalTestCmd(self.log)
def supported(self):
" We can always run commands. "
return True
|
marcusmueller/pybombs
|
pybombs/packagers/cmd.py
|
Python
|
gpl-3.0
| 2,985
|
from django import forms
FIELD_CHOICES = (('ti','Title'),('au','Author'),
('ab','Abstract'),('yr','Year'))
DIR_CHOICES = (('asc','Ascending'),('desc','Descending'))
class SearchForm(forms.Form):
Keyword = forms.CharField(label='',max_length=20)
class SortForm(forms.Form):
SortField = forms.ChoiceField(label='',choices=FIELD_CHOICES)
SortDir = forms.ChoiceField(label='',choices=DIR_CHOICES)
|
jianmingtang/django-app-publication
|
mypub/forms.py
|
Python
|
gpl-3.0
| 403
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
class V3TokenClient(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None, **kwargs):
"""Initialises the Token client
:param auth_url: URL to which the token request is sent
:param disable_ssl_certificate_validation: pass-through to rest client
:param ca_certs: pass-through to rest client
:param trace_requests: pass-through to rest client
:param kwargs: any extra parameter to pass through the rest client.
Three kwargs are forbidden: region, service and auth_provider
as they are not meaningful for token client
"""
dscv = disable_ssl_certificate_validation
for unwanted_kwargs in ['region', 'service', 'auth_provider']:
kwargs.pop(unwanted_kwargs, None)
super(V3TokenClient, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests, **kwargs)
if auth_url is None:
raise exceptions.IdentityError("Couldn't determine auth_url")
if 'auth/tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/auth/tokens'
self.auth_url = auth_url
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
domain_name=None, token=None, app_cred_id=None,
app_cred_secret=None):
"""Obtains a token from the authentication service
:param user_id: user id
:param username: user name
:param user_domain_id: the user domain id
:param user_domain_name: the user domain name
:param project_domain_id: the project domain id
:param project_domain_name: the project domain name
:param domain_id: a domain id to scope to
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
:param token: a token to re-scope.
Accepts different combinations of credentials.
Sample sample valid combinations:
- token
- token, project_name, project_domain_id
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
Validation is left to the server side.
"""
creds = {
'auth': {
'identity': {
'methods': [],
}
}
}
id_obj = creds['auth']['identity']
if token:
id_obj['methods'].append('token')
id_obj['token'] = {
'id': token
}
if (user_id or username) and password:
id_obj['methods'].append('password')
id_obj['password'] = {
'user': {
'password': password,
}
}
if user_id:
id_obj['password']['user']['id'] = user_id
else:
id_obj['password']['user']['name'] = username
_domain = None
if user_domain_id is not None:
_domain = dict(id=user_domain_id)
elif user_domain_name is not None:
_domain = dict(name=user_domain_name)
if _domain:
id_obj['password']['user']['domain'] = _domain
if app_cred_id and app_cred_secret:
id_obj['methods'].append('application_credential')
id_obj['application_credential'] = {
'id': app_cred_id,
'secret': app_cred_secret,
}
if (project_id or project_name):
_project = dict()
if project_id:
_project['id'] = project_id
elif project_name:
_project['name'] = project_name
if project_domain_id is not None:
_project['domain'] = {'id': project_domain_id}
elif project_domain_name is not None:
_project['domain'] = {'name': project_domain_name}
creds['auth']['scope'] = dict(project=_project)
elif domain_id:
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
body = json.dumps(creds, sort_keys=True)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body)
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
"""A simple HTTP request interface.
Note: this overloads the `request` method from the parent class and
thus must implement the same method signature.
"""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp, req_headers=headers,
req_body='<omitted>', resp_body=resp_body)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201, 204]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
return resp, json.loads(resp_body)
def get_token(self, **kwargs):
"""Returns (token id, token data) for supplied credentials"""
auth_data = kwargs.pop('auth_data', False)
if not (kwargs.get('user_domain_id') or
kwargs.get('user_domain_name')):
kwargs['user_domain_name'] = 'Default'
if not (kwargs.get('project_domain_id') or
kwargs.get('project_domain_name')):
kwargs['project_domain_name'] = 'Default'
body = self.auth(**kwargs)
token = body.response.get('x-subject-token')
if auth_data:
return token, body['token']
else:
return token
class V3TokenClientJSON(V3TokenClient):
LOG = logging.getLogger(__name__)
def _warn(self):
self.LOG.warning("%s class was deprecated and renamed to %s",
self.__class__.__name__, 'V3TokenClient')
def __init__(self, *args, **kwargs):
self._warn()
super(V3TokenClientJSON, self).__init__(*args, **kwargs)
|
masayukig/tempest
|
tempest/lib/services/identity/v3/token_client.py
|
Python
|
apache-2.0
| 8,017
|
"""Data normalizer API.
The purpose of data normalizers is to process output
from `mercator-go` command line tool and normalize it.
Ecosystem-specific implementations live in their respective modules,
e.g.: NPM data normalizer can be found in `javascript` module.
All normalizers inherit from `f8a_worker.data_normalizer.AbstractDataNormalizer` class.
"""
import argparse
import json
import sys
from f8a_worker.data_normalizer.abstract import AbstractDataNormalizer
from f8a_worker.data_normalizer.csharp import NugetDataNormalizer
from f8a_worker.data_normalizer.go import (
GoGlideDataNormalizer, GoFedlibDataNormalizer, GodepsDataNormalizer
)
from f8a_worker.data_normalizer.java import MavenDataNormalizer, GradleDataNormalizer
from f8a_worker.data_normalizer.javascript import NpmDataNormalizer
from f8a_worker.data_normalizer.python import (
PythonDistDataNormalizer, PythonDataNormalizer, PythonRequirementsTxtDataNormalizer
)
assert AbstractDataNormalizer # Make linters happy
def normalize(mercator_output):
"""Normalize mercator output.
:param mercator_output: dict, output from mercator-go
"""
normalizers = {
'python': PythonDataNormalizer,
'python-dist': PythonDistDataNormalizer,
'python-requirementstxt': PythonRequirementsTxtDataNormalizer,
'npm': NpmDataNormalizer,
'java-pom': MavenDataNormalizer,
'dotnetsolution': NugetDataNormalizer,
'gofedlib': GoFedlibDataNormalizer,
'go-glide': GoGlideDataNormalizer,
'go-godeps': GodepsDataNormalizer,
'gradlebuild': GradleDataNormalizer
}
ecosystem = mercator_output.get('ecosystem', '').lower()
normalizer = normalizers.get(ecosystem)
if not normalizer:
raise ValueError('Unsupported ecosystem: {e}'.format(e=ecosystem))
result = normalizer(mercator_output.get('result', {})).normalize() or {}
result['ecosystem'] = ecosystem
return result
def _dict2json(o, pretty=True):
"""Serialize dictionary to json."""
kwargs = {}
if pretty:
kwargs['sort_keys'] = True,
kwargs['separators'] = (',', ': ')
kwargs['indent'] = 4
return json.dumps(o, **kwargs)
def _main():
"""Read Mercator produced data from stdin and process."""
parser = argparse.ArgumentParser(sys.argv[0],
description='Data normalizer for mercator')
parser.add_argument('--no-pretty', dest='no_pretty', action='store_true',
help='do not print nicely formatted JSON')
args = parser.parse_args()
content = json.load(sys.stdin)
if content:
items = content.get('items') or []
for item in items:
item['result'] = normalize(item)
print(_dict2json(content, pretty=not args.no_pretty))
return 0
if __name__ == "__main__":
sys.exit(_main())
|
fabric8-analytics/fabric8-analytics-worker
|
f8a_worker/data_normalizer/__init__.py
|
Python
|
gpl-3.0
| 2,878
|
import time
import shutil
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.tests.common_unit_test import UnitTestCase
from nxdrive.osi import AbstractOSIntegration
from nose.plugins.skip import SkipTest
class TestConflicts(UnitTestCase):
def setUp(self):
super(TestConflicts, self).setUp()
self.workspace_id = ('defaultSyncRootFolderItemFactory#default#' + self.workspace)
self.file_id = self.remote_file_system_client_1.make_file(self.workspace_id, 'test.txt', 'Some content').uid
self.engine_1.start()
self.wait_sync(wait_for_async=True)
self.assertTrue(self.local_client_1.exists('/test.txt'))
def test_self_conflict(self):
remote = self.remote_file_system_client_1
local = self.local_client_1
# Update content on both sides by the same user, remote last
remote.update_content(self.file_id, 'Remote update')
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEquals(len(local.get_children_info('/')), 1)
self.assertTrue(local.exists('/test.txt'))
self.assertEquals(local.get_content('/test.txt'), 'Local update')
remote_children = remote.get_children_info(self.workspace_id)
self.assertEquals(len(remote_children), 1)
self.assertEquals(remote_children[0].uid, self.file_id)
self.assertEquals(remote_children[0].name, 'test.txt')
self.assertEquals(remote.get_content(remote_children[0].uid), 'Remote update')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
# Update content on both sides by the same user, local last
remote.update_content(self.file_id, 'Remote update 2')
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/test.txt', 'Local update 2')
self.wait_sync(wait_for_async=True)
self.assertEquals(len(local.get_children_info('/')), 1)
self.assertTrue(local.exists('/test.txt'))
self.assertEquals(local.get_content('/test.txt'), 'Local update 2')
remote_children = remote.get_children_info(self.workspace_id)
self.assertEquals(len(remote_children), 1)
self.assertEquals(remote_children[0].uid, self.file_id)
self.assertEquals(remote_children[0].name, 'test.txt')
self.assertEquals(remote.get_content(remote_children[0].uid), 'Remote update 2')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_real_conflict(self):
local = self.local_client_1
remote = self.remote_file_system_client_2
# Update content on both sides by different users, remote last
time.sleep(OS_STAT_MTIME_RESOLUTION)
# Race condition is still possible
remote.update_content(self.file_id, 'Remote update')
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEquals(remote.get_content(self.file_id), 'Remote update')
self.assertEquals(local.get_content('/test.txt'), 'Local update')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
# Update content on both sides by different users, local last
remote.update_content(self.file_id, 'Remote update 2')
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/test.txt', 'Local update 2')
self.wait_sync(wait_for_async=True)
self.assertEquals(remote.get_content(self.file_id), 'Remote update 2')
self.assertEquals(local.get_content('/test.txt'), 'Local update 2')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_conflict_on_lock(self):
doc_uid = self.file_id.split("#")[-1]
local = self.local_client_1
remote = self.remote_file_system_client_2
self.remote_document_client_2.lock(doc_uid)
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEquals(local.get_content('/test.txt'), 'Local update')
self.assertEquals(remote.get_content(self.file_id), 'Some content')
remote.update_content(self.file_id, 'Remote update')
self.wait_sync(wait_for_async=True)
self.assertEquals(local.get_content('/test.txt'), 'Local update')
self.assertEquals(remote.get_content(self.file_id), 'Remote update')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
self.remote_document_client_2.unlock(doc_uid)
self.wait_sync(wait_for_async=True)
self.assertEquals(local.get_content('/test.txt'), 'Local update')
self.assertEquals(remote.get_content(self.file_id), 'Remote update')
self.assertEquals(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_XLS_conflict_on_locked_document(self):
if not AbstractOSIntegration.is_windows():
raise SkipTest("Only makes sense under Windows")
self._XLS_local_update_on_locked_document(locked_from_start=False)
def test_XLS_conflict_on_locked_document_from_start(self):
if not AbstractOSIntegration.is_windows():
raise SkipTest("Only makes sense under Windows")
self._XLS_local_update_on_locked_document()
def _XLS_local_update_on_locked_document(self, locked_from_start=True):
remote = self.remote_file_system_client_2
local = self.local_client_1
# user2: create remote XLS file
fs_item_id = remote.make_file(self.workspace_id, 'Excel 97 file.xls',
b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00').uid
doc_uid = fs_item_id.split("#")[-1]
self.wait_sync(wait_for_async=True)
self.assertTrue(local.exists('/Excel 97 file.xls'))
if locked_from_start:
# user2: lock document before user1 opening it
self.remote_document_client_2.lock(doc_uid)
# user1: simulate opening XLS file with MS Office ~= update its content
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
self.wait_sync(wait_for_async=locked_from_start)
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
if locked_from_start:
# remote content hasn't changed, pair state is conflicted and remote_can_update flag is False
self.assertEquals(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
self.assertEquals(pair_state.pair_state, 'conflicted')
self.assertFalse(pair_state.remote_can_update)
else:
# remote content has changed, pair state is synchronized and remote_can_update flag is True
self.assertEquals(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
self.assertEquals(pair_state.pair_state, 'synchronized')
self.assertTrue(pair_state.remote_can_update)
if not locked_from_start:
# user2: lock document after user1 opening it
self.remote_document_client_2.lock(doc_uid)
# user1: simulate updating XLS file with MS Office
# 1. Create empty file 787D3000
# 2. Update 787D3000
# 3. Update Excel 97 file.xls
# 4. Update 787D3000
# 5. Move Excel 97 file.xls to 1743B25F.tmp
# 6. Move 787D3000 to Excel 97 file.xls
# 7. Update Excel 97 file.xls
# 8. Update 1743B25F.tmp
# 9. Update Excel 97 file.xls
# 10. Delete 1743B25F.tmp
local.make_file('/', '787D3000')
local.update_content('/787D3000', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02')
local.update_content('/787D3000', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
shutil.move(local._abspath('/Excel 97 file.xls'), local._abspath('/1743B25F.tmp'))
shutil.move(local._abspath('/787D3000'), local._abspath('/Excel 97 file.xls'))
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03\x04')
local.update_content('/1743B25F.tmp', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
local.delete_final('/1743B25F.tmp')
self.wait_sync(wait_for_async=not locked_from_start)
self.assertEquals(len(local.get_children_info('/')), 2)
self.assertEquals(local.get_content('/Excel 97 file.xls'), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
# remote content hasn't changed, pair state is conflicted and remote_can_update flag is False
if locked_from_start:
self.assertEquals(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
else:
self.assertEquals(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEquals(pair_state.pair_state, 'conflicted')
self.assertFalse(pair_state.remote_can_update)
# user2: remote update, conflict is detected once again and remote_can_update flag is still False
remote.update_content(fs_item_id, b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02', 'New Excel 97 file.xls')
self.wait_sync(wait_for_async=True)
self.assertEquals(len(local.get_children_info('/')), 2)
self.assertTrue(local.exists('/Excel 97 file.xls'))
self.assertEquals(local.get_content('/Excel 97 file.xls'), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
self.assertEquals(len(remote.get_children_info(self.workspace_id)), 2)
self.assertEquals(remote.get_info(fs_item_id).name, 'New Excel 97 file.xls')
self.assertEquals(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02')
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEquals(pair_state.pair_state, 'conflicted')
self.assertFalse(pair_state.remote_can_update)
# user2: unlock document, conflict is detected once again and remote_can_update flag is now True
self.remote_document_client_2.unlock(doc_uid)
self.wait_sync(wait_for_async=True)
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEquals(pair_state.pair_state, 'conflicted')
self.assertTrue(pair_state.remote_can_update)
|
rsoumyassdi/nuxeo-drive
|
nuxeo-drive-client/nxdrive/tests/test_conflicts.py
|
Python
|
lgpl-2.1
| 11,058
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.